From 1f6941ff436ca063b09bb04ff2d841b989b8712a Mon Sep 17 00:00:00 2001 From: Dries Peeters Date: Mon, 1 Dec 2025 08:15:30 +0100 Subject: [PATCH] Fix AUTH_METHOD=none and add comprehensive schema verification - Fix AUTH_METHOD=none: Read from Flask app config instead of Config class - Add comprehensive schema verification: Verify all SQLAlchemy models against database and auto-fix missing columns - Improve startup logging: Unified format with timestamps and log levels - Enhanced migration flow: Automatic schema verification after migrations Fixes authentication issue where password field showed even with AUTH_METHOD=none. Ensures all database columns from models exist, preventing missing column errors. Improves startup logging for better debugging and monitoring. --- app/routes/auth.py | 13 +- app/routes/scheduled_reports.py | 95 ----------- docker/entrypoint.py | 56 ++++++- docker/init-database-enhanced.py | 171 ++++++++++++++------ docker/start-fixed.py | 78 ++++------ scripts/fix_missing_columns.py | 260 +++++++++++++++++++++++++++++++ scripts/verify_and_fix_schema.py | 248 +++++++++++++++++++++++++++++ setup.py | 2 +- 8 files changed, 725 insertions(+), 198 deletions(-) create mode 100644 scripts/fix_missing_columns.py create mode 100644 scripts/verify_and_fix_schema.py diff --git a/app/routes/auth.py b/app/routes/auth.py index 7bf016a..378a922 100644 --- a/app/routes/auth.py +++ b/app/routes/auth.py @@ -54,13 +54,14 @@ def login(): if current_user.is_authenticated: return redirect(url_for("main.dashboard")) - # Get authentication method + # Get authentication method from Flask app config (reads from environment) try: - auth_method = (getattr(Config, "AUTH_METHOD", "local") or "local").strip().lower() + auth_method = (current_app.config.get("AUTH_METHOD", "local") or "local").strip().lower() except Exception: auth_method = "local" # Determine if password authentication is required + # 'none' = no password, 'local' = password required, 'oidc' = OIDC only, 'both' = OIDC + password requires_password = auth_method in ("local", "both") # If OIDC-only mode, redirect to OIDC login start @@ -295,7 +296,7 @@ def logout(): # Try OIDC end-session if enabled and configured try: - auth_method = (getattr(Config, "AUTH_METHOD", "local") or "local").strip().lower() + auth_method = (current_app.config.get("AUTH_METHOD", "local") or "local").strip().lower() except Exception: auth_method = "local" @@ -344,9 +345,9 @@ def profile(): @login_required def edit_profile(): """Edit user profile""" - # Get authentication method to determine if password fields should be shown + # Get authentication method from Flask app config (reads from environment) try: - auth_method = (getattr(Config, "AUTH_METHOD", "local") or "local").strip().lower() + auth_method = (current_app.config.get("AUTH_METHOD", "local") or "local").strip().lower() except Exception: auth_method = "local" @@ -553,7 +554,7 @@ def update_theme_preference(): def login_oidc(): """Start OIDC login using Authlib.""" try: - auth_method = (getattr(Config, "AUTH_METHOD", "local") or "local").strip().lower() + auth_method = (current_app.config.get("AUTH_METHOD", "local") or "local").strip().lower() except Exception: auth_method = "local" diff --git a/app/routes/scheduled_reports.py b/app/routes/scheduled_reports.py index 9136a90..1c0c102 100644 --- a/app/routes/scheduled_reports.py +++ b/app/routes/scheduled_reports.py @@ -201,98 +201,3 @@ def api_saved_views(): ] } ) - - -@scheduled_reports_bp.route("/api/reports/scheduled", methods=["POST"]) -@login_required -def api_create_scheduled(): - """Create scheduled report via API""" - service = ScheduledReportService() - data = request.get_json() - - saved_view_id = data.get("saved_view_id", type=int) - recipients = data.get("recipients", "").strip() - cadence = data.get("cadence", "").strip() - cron = data.get("cron", "").strip() or None - timezone = data.get("timezone", "").strip() or None - - if not saved_view_id or not recipients or not cadence: - return jsonify({"success": False, "error": _("Please fill in all required fields.")}), 400 - - result = service.create_schedule( - saved_view_id=saved_view_id, - recipients=recipients, - cadence=cadence, - created_by=current_user.id, - cron=cron, - timezone=timezone, - ) - - if result["success"]: - return jsonify( - { - "success": True, - "schedule": { - "id": result["schedule"].id, - "saved_view_name": ( - result["schedule"].saved_view.name if result["schedule"].saved_view else "Unknown" - ), - "recipients": result["schedule"].recipients, - "cadence": result["schedule"].cadence, - "next_run_at": ( - result["schedule"].next_run_at.isoformat() if result["schedule"].next_run_at else None - ), - }, - } - ) - else: - return jsonify({"success": False, "error": result["message"]}), 400 - - -@scheduled_reports_bp.route("/api/reports/scheduled//toggle", methods=["POST"]) -@login_required -def api_toggle_scheduled(schedule_id): - """Toggle active status of scheduled report""" - from app import db - - schedule = ReportEmailSchedule.query.get_or_404(schedule_id) - - if schedule.created_by != current_user.id and not current_user.is_admin: - return jsonify({"success": False, "error": _("Permission denied")}), 403 - - schedule.active = not schedule.active - db.session.commit() - - return jsonify({"success": True, "active": schedule.active}) - - -@scheduled_reports_bp.route("/api/reports/scheduled/", methods=["DELETE"]) -@login_required -def api_delete_scheduled(schedule_id): - """Delete scheduled report via API""" - service = ScheduledReportService() - result = service.delete_schedule(schedule_id, current_user.id) - - if result["success"]: - return jsonify({"success": True}) - else: - return jsonify({"success": False, "error": result["message"]}), 400 - - -@scheduled_reports_bp.route("/api/reports/saved-views", methods=["GET"]) -@login_required -def api_saved_views(): - """Get saved report views for current user""" - saved_views = SavedReportView.query.filter_by(owner_id=current_user.id).all() - return jsonify( - { - "saved_views": [ - { - "id": sv.id, - "name": sv.name, - "scope": sv.scope, - } - for sv in saved_views - ] - } - ) diff --git a/docker/entrypoint.py b/docker/entrypoint.py index 1aaaa87..0a4577a 100644 --- a/docker/entrypoint.py +++ b/docker/entrypoint.py @@ -98,12 +98,66 @@ def run_migrations(): # Try to apply any pending migrations result = subprocess.run(['flask', 'db', 'upgrade'], - capture_output=True, text=True, timeout=60) + capture_output=True, text=True, timeout=120) if result.returncode == 0: log("✓ Migrations applied successfully") + + # Verify all columns from models exist and fix if missing + log("Verifying complete database schema against models...") + fix_result = subprocess.run( + ['python', '/app/scripts/verify_and_fix_schema.py'], + capture_output=True, + text=True, + timeout=180 + ) + if fix_result.returncode == 0: + # Print output to show what was fixed + if fix_result.stdout: + for line in fix_result.stdout.strip().split('\n'): + if line.strip() and not line.startswith('='): + log(line) + log("✓ Database schema verified and fixed") + else: + log(f"⚠ Schema verification had issues: {fix_result.stderr}") + # Fallback to the simpler fix script + log("Attempting fallback column fix...") + fallback_result = subprocess.run( + ['python', '/app/scripts/fix_missing_columns.py'], + capture_output=True, + text=True, + timeout=60 + ) + if fallback_result.returncode == 0: + log("✓ Fallback fix completed") + return True else: log(f"⚠ Migration application failed: {result.stderr}") + # Try to fix missing columns even if migration failed + log("Attempting to fix missing columns...") + fix_result = subprocess.run( + ['python', '/app/scripts/verify_and_fix_schema.py'], + capture_output=True, + text=True, + timeout=180 + ) + if fix_result.returncode == 0: + if fix_result.stdout: + for line in fix_result.stdout.strip().split('\n'): + if line.strip() and not line.startswith('='): + log(line) + log("✓ Missing columns fixed") + else: + # Fallback to simpler script + log("Trying fallback fix...") + fallback_result = subprocess.run( + ['python', '/app/scripts/fix_missing_columns.py'], + capture_output=True, + text=True, + timeout=60 + ) + if fallback_result.returncode == 0: + log("✓ Fallback fix completed") return False else: log("No migrations directory found, initializing...") diff --git a/docker/init-database-enhanced.py b/docker/init-database-enhanced.py index 7a510b0..2d07165 100644 --- a/docker/init-database-enhanced.py +++ b/docker/init-database-enhanced.py @@ -11,23 +11,35 @@ import traceback from sqlalchemy import create_engine, text, inspect, MetaData from sqlalchemy.exc import OperationalError, ProgrammingError +def log(message, level="INFO"): + """Log message with timestamp and level""" + from datetime import datetime + timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + prefix = { + "INFO": "ℹ", + "SUCCESS": "✓", + "WARNING": "⚠", + "ERROR": "✗" + }.get(level, "•") + print(f"[{timestamp}] {prefix} {message}") + def wait_for_database(url, max_attempts=30, delay=2): """Wait for database to be ready""" - print(f"Waiting for database to be ready...") + log("Waiting for database connection...", "INFO") for attempt in range(max_attempts): try: engine = create_engine(url, pool_pre_ping=True) with engine.connect() as conn: conn.execute(text("SELECT 1")) - print("Database connection established successfully") + log("Database connection established", "SUCCESS") return engine except Exception as e: - print(f"Waiting for database... (attempt {attempt+1}/{max_attempts}): {e}") if attempt < max_attempts - 1: + log(f"Connection attempt {attempt+1}/{max_attempts} failed, retrying...", "WARNING") time.sleep(delay) else: - print("Database not ready after waiting, exiting...") + log(f"Database not ready after {max_attempts} attempts: {e}", "ERROR") sys.exit(1) return None @@ -230,7 +242,7 @@ def create_table_if_not_exists(engine, table_name, table_schema): with engine.connect() as conn: conn.execute(text(create_sql)) conn.commit() - print(f"✓ Created table: {table_name}") + log(f"Created table: {table_name}", "SUCCESS") return True else: # Check if table needs schema updates @@ -244,7 +256,7 @@ def create_table_if_not_exists(engine, table_name, table_schema): missing_columns.append((col_name, col_def)) if missing_columns: - print(f"⚠ Table {table_name} exists but missing columns: {[col[0] for col in missing_columns]}") + log(f"Table {table_name} exists but missing columns: {[col[0] for col in missing_columns]}", "WARNING") # Add missing columns with engine.connect() as conn: @@ -254,18 +266,18 @@ def create_table_if_not_exists(engine, table_name, table_schema): col_type_def = ' '.join(col_def.split()[1:]) alter_sql = f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_def}" conn.execute(text(alter_sql)) - print(f" ✓ Added column: {col_name}") + log(f" Added column: {col_name}", "SUCCESS") except Exception as e: - print(f" ⚠ Could not add column {col_name}: {e}") + log(f" Could not add column {col_name}: {e}", "WARNING") conn.commit() return True else: - print(f"✓ Table {table_name} exists with correct schema") + log(f"Table {table_name} exists with correct schema", "SUCCESS") return True except Exception as e: - print(f"✗ Error creating/updating table {table_name}: {e}") + log(f"Error creating/updating table {table_name}: {e}", "ERROR") return False def create_indexes(engine, table_name, table_schema): @@ -284,16 +296,16 @@ def create_indexes(engine, table_name, table_schema): conn.commit() if table_schema['indexes']: - print(f"✓ Indexes created for {table_name}") + log(f"Indexes created for {table_name}", "SUCCESS") return True except Exception as e: - print(f"⚠ Error creating indexes for {table_name}: {e}") + log(f"Error creating indexes for {table_name}: {e}", "WARNING") return True # Don't fail on index creation errors def create_triggers(engine): """Create triggers for automatic timestamp updates""" - print("Creating triggers...") + # Triggers are created silently try: with engine.connect() as conn: @@ -320,20 +332,20 @@ def create_triggers(engine): FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); """)) except Exception as e: - print(f" ⚠ Could not create trigger for {table}: {e}") + pass # Trigger creation errors are non-fatal conn.commit() - print("✓ Triggers created successfully") + log("Triggers created", "SUCCESS") return True except Exception as e: - print(f"⚠ Error creating triggers: {e}") + log(f"Error creating triggers: {e}", "WARNING") return True # Don't fail on trigger creation errors def insert_initial_data(engine): """Insert initial data""" - print("Inserting initial data...") + # Initial data insertion is logged separately try: # Check if initial data has already been seeded @@ -359,7 +371,7 @@ def insert_initial_data(engine): # Only insert default client and project on fresh installations if not installation_config.is_initial_data_seeded(): - print("Fresh installation detected, creating default client and project...") + # Fresh installation - default client/project will be created # Check if there are any existing projects result = conn.execute(text("SELECT COUNT(*) FROM projects;")) @@ -385,16 +397,16 @@ def insert_initial_data(engine): SELECT 1 FROM projects p WHERE p.name = 'General' ); """)) - print("✓ Default client and project created") + log("Default client and project created", "SUCCESS") # Mark initial data as seeded installation_config.mark_initial_data_seeded() - print("✓ Marked initial data as seeded") + log("Marked initial data as seeded", "SUCCESS") else: - print(f"Projects already exist ({project_count} found), marking initial data as seeded") + log(f"Projects already exist ({project_count} found), marking initial data as seeded", "INFO") installation_config.mark_initial_data_seeded() else: - print("Initial data already seeded previously, skipping default client/project creation") + log("Initial data already seeded previously, skipping default client/project creation", "INFO") # Insert default settings only if none exist (singleton semantics) conn.execute(text(""" @@ -418,18 +430,16 @@ def insert_initial_data(engine): conn.commit() - print("✓ Initial data inserted successfully") + log("Initial data inserted successfully", "SUCCESS") return True except Exception as e: - print(f"⚠ Error inserting initial data: {e}") - import traceback - print(f"Traceback: {traceback.format_exc()}") + log(f"Error inserting initial data: {e}", "WARNING") return True # Don't fail on data insertion errors def verify_database_schema(engine): """Verify that all required tables and columns exist""" - print("Verifying database schema...") + log("Running basic schema verification...", "INFO") try: inspector = inspect(engine) @@ -452,18 +462,18 @@ def verify_database_schema(engine): schema_issues.append(f"{table_name}: missing {missing_columns}") if missing_tables: - print(f"✗ Missing tables: {missing_tables}") + log(f"Missing tables: {missing_tables}", "ERROR") return False if schema_issues: - print(f"⚠ Schema issues found: {schema_issues}") + log(f"Schema issues found: {schema_issues}", "WARNING") return False - print("✓ Database schema verification passed") + log("Basic schema verification passed", "SUCCESS") return True except Exception as e: - print(f"✗ Error verifying schema: {e}") + log(f"Error verifying schema: {e}", "ERROR") return False def main(): @@ -471,37 +481,46 @@ def main(): url = os.getenv("DATABASE_URL", "") if not url.startswith("postgresql"): - print("No PostgreSQL database configured, skipping initialization") + log("No PostgreSQL database configured, skipping initialization", "WARNING") return - print(f"Database URL: {url}") + log(f"Database URL: {url[:50]}..." if len(url) > 50 else f"Database URL: {url}", "INFO") # Wait for database to be ready engine = wait_for_database(url) - print("=== Starting enhanced database initialization ===") + log("=" * 60, "INFO") + log("Starting database initialization", "INFO") + log("=" * 60, "INFO") # Get required schema required_schema = get_required_schema() + log(f"Found {len(required_schema)} core tables to verify", "INFO") # Create/update tables - print("\n--- Creating/updating tables ---") + log("Verifying core tables...", "INFO") + tables_updated = 0 for table_name, table_schema in required_schema.items(): - if not create_table_if_not_exists(engine, table_name, table_schema): - print(f"Failed to create/update table {table_name}") + if create_table_if_not_exists(engine, table_name, table_schema): + tables_updated += 1 + else: + log(f"Failed to create/update table {table_name}", "ERROR") sys.exit(1) + if tables_updated > 0: + log(f"Verified {tables_updated} core tables", "SUCCESS") + # Create indexes - print("\n--- Creating indexes ---") + log("Creating indexes...", "INFO") for table_name, table_schema in required_schema.items(): create_indexes(engine, table_name, table_schema) # Create triggers - print("\n--- Creating triggers ---") + log("Creating triggers...", "INFO") create_triggers(engine) # Run legacy migrations (projects.client -> projects.client_id) - print("\n--- Running legacy migrations ---") + log("Running legacy migrations...", "INFO") try: inspector = inspect(engine) project_columns = [c['name'] for c in inspector.get_columns('projects')] if 'projects' in inspector.get_table_names() else [] @@ -529,21 +548,73 @@ def main(): except Exception: pass conn.commit() - print("✓ Migrated legacy projects.client to client_id") + log("Migrated legacy projects.client to client_id", "SUCCESS") except Exception as e: - print(f"⚠ Legacy migration failed (non-fatal): {e}") + log(f"Legacy migration skipped (non-fatal): {e}", "WARNING") # Insert initial data - print("\n--- Inserting initial data ---") + log("Inserting initial data...", "INFO") insert_initial_data(engine) - # Verify everything was created correctly - print("\n--- Verifying database schema ---") - if verify_database_schema(engine): - print("\n✓ Enhanced database initialization completed successfully") - else: - print("\n✗ Database initialization failed - schema verification failed") - sys.exit(1) + # Verify everything was created correctly using comprehensive schema verification + log("=" * 60, "INFO") + log("Running comprehensive schema verification", "INFO") + log("Checking all SQLAlchemy models against database schema...", "INFO") + log("=" * 60, "INFO") + + # Run the comprehensive schema verification script + import subprocess + try: + result = subprocess.run( + [sys.executable, '/app/scripts/verify_and_fix_schema.py'], + capture_output=True, + text=True, + timeout=180, + env=os.environ.copy() + ) + + if result.returncode == 0: + # Print important output lines (skip separators and empty lines) + if result.stdout: + for line in result.stdout.strip().split('\n'): + line = line.strip() + if line and not line.startswith('=') and not line.startswith('TimeTracker'): + # Only show important messages + if any(keyword in line for keyword in ['Added column', 'already exists', 'Loaded', 'Tables checked', 'Columns added']): + log(f" {line}", "INFO") + log("Comprehensive schema verification completed", "SUCCESS") + log("=" * 60, "INFO") + log("Database initialization completed successfully", "SUCCESS") + log("=" * 60, "INFO") + else: + log("Comprehensive schema verification had issues", "WARNING") + if result.stderr: + log(f"Error details: {result.stderr[:200]}", "WARNING") + # Fall back to basic verification + log("Falling back to basic schema verification...", "WARNING") + if verify_database_schema(engine): + log("Basic schema verification passed", "SUCCESS") + log("Database initialization completed successfully", "SUCCESS") + else: + log("Database initialization failed - schema verification failed", "ERROR") + sys.exit(1) + except subprocess.TimeoutExpired: + log("Schema verification timed out, falling back to basic verification...", "WARNING") + if verify_database_schema(engine): + log("Basic schema verification passed", "SUCCESS") + log("Database initialization completed successfully", "SUCCESS") + else: + log("Database initialization failed - schema verification failed", "ERROR") + sys.exit(1) + except Exception as e: + log(f"Error running comprehensive schema verification: {e}", "WARNING") + log("Falling back to basic schema verification...", "WARNING") + if verify_database_schema(engine): + log("Basic schema verification passed", "SUCCESS") + log("Database initialization completed successfully", "SUCCESS") + else: + log("Database initialization failed - schema verification failed", "ERROR") + sys.exit(1) if __name__ == "__main__": main() diff --git a/docker/start-fixed.py b/docker/start-fixed.py index 404deb3..7fdaf06 100644 --- a/docker/start-fixed.py +++ b/docker/start-fixed.py @@ -14,7 +14,7 @@ from urllib.parse import urlparse def wait_for_database(): """Wait for database to be ready with proper connection testing""" - print("Waiting for database to be ready...") + # Logging is handled by main() # Get database URL from environment db_url = os.getenv('DATABASE_URL', 'postgresql+psycopg2://timetracker:timetracker@db:5432/timetracker') @@ -80,7 +80,6 @@ def wait_for_database(): while attempt < max_attempts: try: - print(f"Attempting database connection to {host}:{port}/{database} as {user}...") conn = psycopg2.connect( host=host, port=port, @@ -90,42 +89,29 @@ def wait_for_database(): connect_timeout=5 ) conn.close() - print("✓ Database connection successful!") return True except Exception as e: attempt += 1 - print(f"✗ Database connection attempt {attempt}/{max_attempts} failed: {e}") if attempt < max_attempts: - print("Waiting 2 seconds before retry...") time.sleep(2) - print("✗ Failed to connect to database after all attempts") return False def run_script(script_path, description): """Run a Python script with proper error handling""" - print(f"Running {description}...") try: result = subprocess.run( [sys.executable, script_path], check=True, - capture_output=True, + capture_output=False, # Let the script output directly text=True ) - print(f"✓ {description} completed successfully") - if result.stdout: - print(f"Output: {result.stdout}") return True except subprocess.CalledProcessError as e: - print(f"✗ {description} failed with exit code {e.returncode}") - if e.stdout: - print(f"stdout: {e.stdout}") - if e.stderr: - print(f"stderr: {e.stderr}") + log(f"{description} failed with exit code {e.returncode}", "ERROR") return False except Exception as e: - print(f"✗ Unexpected error running {description}: {e}") - traceback.print_exc() + log(f"Unexpected error running {description}: {e}", "ERROR") return False def display_network_info(): @@ -148,33 +134,45 @@ def display_network_info(): print(f"Working Directory: {os.getcwd()}") print("==========================") +def log(message, level="INFO"): + """Log message with timestamp and level""" + from datetime import datetime + timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + prefix = { + "INFO": "ℹ", + "SUCCESS": "✓", + "WARNING": "⚠", + "ERROR": "✗" + }.get(level, "•") + print(f"[{timestamp}] {prefix} {message}") + def main(): - print("=== Starting TimeTracker (Improved Python Mode) ===") - - # Display network information for debugging - display_network_info() + log("=" * 60, "INFO") + log("Starting TimeTracker Application", "INFO") + log("=" * 60, "INFO") # Set environment os.environ['FLASK_APP'] = 'app' os.chdir('/app') # Wait for database + log("Waiting for database connection...", "INFO") if not wait_for_database(): - print("Database is not available, exiting...") + log("Database is not available, exiting...", "ERROR") sys.exit(1) - # Run enhanced database initialization and migration (strict schema verification and auto-fix) - if not run_script('/app/docker/init-database-enhanced.py', 'Enhanced database initialization and migration'): - print("Enhanced database initialization failed, exiting...") + # Run enhanced database initialization and migration + log("Running database initialization...", "INFO") + if not run_script('/app/docker/init-database-enhanced.py', 'Database initialization'): + log("Database initialization failed, exiting...", "ERROR") sys.exit(1) - print("✓ Database initialization and migration completed successfully") + log("Database initialization completed", "SUCCESS") # Ensure default settings and admin user exist (idempotent) # Note: Database initialization is already handled by the migration system above # The flask init_db command is optional and may not be available in all environments try: - print("Ensuring default settings and admin user exist (flask init_db)...") result = subprocess.run( ['flask', 'init_db'], check=False, # Don't fail if command doesn't exist @@ -182,25 +180,15 @@ def main(): text=True, timeout=30 ) - if result.returncode == 0: - if result.stdout: - print(result.stdout.strip()) - else: - # Command failed or doesn't exist - this is OK, database is already initialized - if "No such command" not in result.stderr: - print(f"Warning: flask init_db returned exit code {result.returncode} (continuing)") - if result.stderr: - print(f"stderr: {result.stderr.strip()}") - except FileNotFoundError: - # Flask command not found - this is OK + if result.returncode != 0 and "No such command" not in (result.stderr or ""): + log("flask init_db returned non-zero exit code (continuing)", "WARNING") + except (FileNotFoundError, subprocess.TimeoutExpired, Exception): + # All errors are non-fatal - database is already initialized pass - except subprocess.TimeoutExpired: - print("Warning: flask init_db timed out (continuing)") - except Exception as e: - # Any other error - log but continue - print(f"Warning: could not execute flask init_db: {e}") - print("Starting application...") + log("=" * 60, "INFO") + log("Starting application server", "INFO") + log("=" * 60, "INFO") # Start gunicorn with access logs os.execv('/usr/local/bin/gunicorn', [ 'gunicorn', diff --git a/scripts/fix_missing_columns.py b/scripts/fix_missing_columns.py new file mode 100644 index 0000000..a595644 --- /dev/null +++ b/scripts/fix_missing_columns.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 +""" +Script to manually add missing columns to the users table. +This is a workaround for cases where migrations show as applied but columns are missing. + +Usage: + python scripts/fix_missing_columns.py +""" + +import os +import sys +from sqlalchemy import create_engine, inspect, text +from sqlalchemy.exc import OperationalError + +# Add parent directory to path +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +# Get database URL from environment +DATABASE_URL = os.getenv("DATABASE_URL", "postgresql+psycopg2://timetracker:timetracker@localhost:5432/timetracker") + + +def has_column(engine, table_name, column_name): + """Check if a column exists in a table""" + inspector = inspect(engine) + try: + columns = [col['name'] for col in inspector.get_columns(table_name)] + return column_name in columns + except Exception: + return False + + +def add_column_if_missing(engine, table_name, column_name, column_type, nullable=True, default=None): + """Add a column to a table if it doesn't exist""" + if has_column(engine, table_name, column_name): + print(f"✓ Column '{column_name}' already exists in '{table_name}'") + return False + + try: + with engine.connect() as conn: + # Build ALTER TABLE statement + alter_sql = f"ALTER TABLE {table_name} ADD COLUMN {column_name} {column_type}" + if not nullable: + alter_sql += " NOT NULL" + if default is not None: + alter_sql += f" DEFAULT {default}" + + conn.execute(text(alter_sql)) + conn.commit() + print(f"✓ Added column '{column_name}' to '{table_name}'") + return True + except Exception as e: + print(f"✗ Failed to add column '{column_name}' to '{table_name}': {e}") + return False + + +def main(): + """Main function to add missing columns""" + print("=" * 60) + print("TimeTracker - Fix Missing Database Columns") + print("=" * 60) + print() + + try: + engine = create_engine(DATABASE_URL) + + # Test connection + with engine.connect() as conn: + conn.execute(text("SELECT 1")) + print("✓ Database connection successful") + print() + + # Check if users table exists + inspector = inspect(engine) + if 'users' not in inspector.get_table_names(): + print("✗ 'users' table does not exist. Please run migrations first.") + return 1 + + print("Checking and adding missing columns to 'users' table...") + print() + + # List of columns that should exist based on the User model + # These are the columns that are commonly missing after migration issues + columns_to_add = [ + { + 'name': 'password_hash', + 'type': 'VARCHAR(255)', + 'nullable': True, + 'default': None + }, + { + 'name': 'password_change_required', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'false' + }, + { + 'name': 'email', + 'type': 'VARCHAR(200)', + 'nullable': True, + 'default': None + }, + { + 'name': 'full_name', + 'type': 'VARCHAR(200)', + 'nullable': True, + 'default': None + }, + { + 'name': 'theme_preference', + 'type': 'VARCHAR(10)', + 'nullable': True, + 'default': None + }, + { + 'name': 'preferred_language', + 'type': 'VARCHAR(8)', + 'nullable': True, + 'default': None + }, + { + 'name': 'oidc_sub', + 'type': 'VARCHAR(255)', + 'nullable': True, + 'default': None + }, + { + 'name': 'oidc_issuer', + 'type': 'VARCHAR(255)', + 'nullable': True, + 'default': None + }, + { + 'name': 'avatar_filename', + 'type': 'VARCHAR(255)', + 'nullable': True, + 'default': None + }, + { + 'name': 'email_notifications', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'true' + }, + { + 'name': 'notification_overdue_invoices', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'true' + }, + { + 'name': 'notification_task_assigned', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'true' + }, + { + 'name': 'notification_task_comments', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'true' + }, + { + 'name': 'notification_weekly_summary', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'false' + }, + { + 'name': 'timezone', + 'type': 'VARCHAR(50)', + 'nullable': True, + 'default': None + }, + { + 'name': 'date_format', + 'type': 'VARCHAR(20)', + 'nullable': False, + 'default': "'YYYY-MM-DD'" + }, + { + 'name': 'time_format', + 'type': 'VARCHAR(10)', + 'nullable': False, + 'default': "'24h'" + }, + { + 'name': 'week_start_day', + 'type': 'INTEGER', + 'nullable': False, + 'default': '1' + }, + { + 'name': 'time_rounding_enabled', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'true' + }, + { + 'name': 'time_rounding_minutes', + 'type': 'INTEGER', + 'nullable': False, + 'default': '1' + }, + { + 'name': 'time_rounding_method', + 'type': 'VARCHAR(10)', + 'nullable': False, + 'default': "'nearest'" + }, + { + 'name': 'standard_hours_per_day', + 'type': 'FLOAT', + 'nullable': False, + 'default': '8.0' + }, + { + 'name': 'client_portal_enabled', + 'type': 'BOOLEAN', + 'nullable': False, + 'default': 'false' + }, + { + 'name': 'client_id', + 'type': 'INTEGER', + 'nullable': True, + 'default': None + }, + ] + + added_count = 0 + for col in columns_to_add: + if add_column_if_missing( + engine, + 'users', + col['name'], + col['type'], + col['nullable'], + col['default'] + ): + added_count += 1 + + print() + print("=" * 60) + if added_count > 0: + print(f"✓ Successfully added {added_count} missing column(s)") + else: + print("✓ All columns already exist") + print("=" * 60) + + return 0 + + except Exception as e: + print(f"✗ Error: {e}") + import traceback + traceback.print_exc() + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/verify_and_fix_schema.py b/scripts/verify_and_fix_schema.py new file mode 100644 index 0000000..ec0b298 --- /dev/null +++ b/scripts/verify_and_fix_schema.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python3 +""" +Comprehensive schema verification and fix script. +This script checks all SQLAlchemy models against the actual database schema +and adds any missing columns based on the model definitions. + +Usage: + python scripts/verify_and_fix_schema.py +""" + +import os +import sys +from sqlalchemy import create_engine, inspect, text, MetaData +from sqlalchemy.exc import OperationalError +from sqlalchemy.schema import CreateTable +from sqlalchemy.dialects import postgresql, sqlite + +# Add parent directory to path +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +def get_sqlalchemy_type(column_type, dialect): + """Convert SQLAlchemy column type to SQL string for the given dialect""" + if dialect == 'postgresql': + return str(column_type.compile(dialect=postgresql.dialect())) + else: + return str(column_type.compile(dialect=sqlite.dialect())) + + +def get_column_default(column, dialect): + """Get the default value for a column as SQL string""" + if column.default is None: + return None + + # Handle server defaults (like server_default=text("CURRENT_TIMESTAMP")) + if hasattr(column, 'server_default') and column.server_default is not None: + if hasattr(column.server_default, 'arg'): + default_text = str(column.server_default.arg) + # Remove quotes if it's a function call + if default_text.startswith("'") and default_text.endswith("'"): + default_text = default_text[1:-1] + return default_text + + # Handle Python defaults + if hasattr(column.default, 'arg'): + default_arg = column.default.arg + if isinstance(default_arg, str): + # Escape single quotes in strings + escaped = default_arg.replace("'", "''") + return f"'{escaped}'" + elif isinstance(default_arg, (int, float)): + return str(default_arg) + elif isinstance(default_arg, bool): + return 'true' if default_arg else 'false' + elif callable(default_arg): + # For callable defaults like datetime.utcnow, skip default + # The database will handle NULL values + return None + elif hasattr(column.default, 'text'): + return column.default.text + + return None + + +def has_column(inspector, table_name, column_name): + """Check if a column exists in a table""" + try: + columns = [col['name'] for col in inspector.get_columns(table_name)] + return column_name in columns + except Exception: + return False + + +def add_column_sql(table_name, column, dialect): + """Generate SQL to add a column""" + col_type = get_sqlalchemy_type(column.type, dialect) + nullable = "NULL" if column.nullable else "NOT NULL" + default = get_column_default(column, dialect) + + # Build SQL statement + sql_parts = [f"ALTER TABLE {table_name} ADD COLUMN {column.name} {col_type}"] + + # Add default if specified + if default is not None: + sql_parts.append(f"DEFAULT {default}") + + # Add nullable constraint + sql_parts.append(nullable) + + return " ".join(sql_parts) + + +def verify_and_fix_table(engine, inspector, model_class, dialect): + """Verify and fix columns for a single table""" + table_name = model_class.__tablename__ + + # Check if table exists + if table_name not in inspector.get_table_names(): + print(f"⚠ Table '{table_name}' does not exist (will be created by migrations)") + return 0 + + # Get expected columns from model + expected_columns = {} + for column in model_class.__table__.columns: + expected_columns[column.name] = column + + # Get actual columns from database + try: + actual_columns = {col['name']: col for col in inspector.get_columns(table_name)} + except Exception as e: + print(f"✗ Error inspecting table '{table_name}': {e}") + return 0 + + # Find missing columns + missing_columns = [] + for col_name, col_def in expected_columns.items(): + if col_name not in actual_columns: + missing_columns.append((col_name, col_def)) + + if not missing_columns: + return 0 + + # Add missing columns + added_count = 0 + with engine.begin() as conn: # Use begin() for automatic transaction management + for col_name, col_def in missing_columns: + try: + sql = add_column_sql(table_name, col_def, dialect) + # Execute with explicit transaction + conn.execute(text(sql)) + print(f" ✓ Added column '{col_name}' to '{table_name}'") + added_count += 1 + except Exception as e: + # Log error but continue with other columns + error_msg = str(e) + # Don't fail on "column already exists" errors (race condition) + if "already exists" not in error_msg.lower() and "duplicate" not in error_msg.lower(): + print(f" ✗ Failed to add column '{col_name}' to '{table_name}': {error_msg}") + else: + print(f" ⚠ Column '{col_name}' already exists in '{table_name}' (skipping)") + + return added_count + + +def main(): + """Main function to verify and fix database schema""" + print("=" * 70) + print("TimeTracker - Comprehensive Schema Verification and Fix") + print("=" * 70) + print() + + # Get database URL from environment + DATABASE_URL = os.getenv("DATABASE_URL", "postgresql+psycopg2://timetracker:timetracker@localhost:5432/timetracker") + + try: + engine = create_engine(DATABASE_URL, pool_pre_ping=True) + + # Test connection + with engine.connect() as conn: + conn.execute(text("SELECT 1")) + print("✓ Database connection successful") + + # Detect database dialect + dialect = engine.dialect.name + print(f"✓ Database dialect: {dialect}") + print() + + # Create inspector + inspector = inspect(engine) + + # Import all models + print("Loading SQLAlchemy models...") + try: + from app import create_app + app = create_app() + with app.app_context(): + # Import all models dynamically from app.models + from app.models import __all__ as model_names + import app.models as models_module + + # Get all model classes + models = [] + for name in model_names: + try: + model_class = getattr(models_module, name) + if hasattr(model_class, '__tablename__'): + models.append(model_class) + except AttributeError: + pass + + # Also get any models that might not be in __all__ + # This ensures we catch everything + for attr_name in dir(models_module): + if not attr_name.startswith('_'): + attr = getattr(models_module, attr_name) + if (hasattr(attr, '__tablename__') and + hasattr(attr, '__table__') and + attr not in models): + models.append(attr) + + print(f"✓ Loaded {len(models)} model classes") + print() + print("Verifying database schema...") + print() + + total_added = 0 + tables_checked = 0 + + for model in models: + if hasattr(model, '__tablename__'): + tables_checked += 1 + added = verify_and_fix_table(engine, inspector, model, dialect) + total_added += added + if added > 0: + print(f" → Fixed {added} column(s) in '{model.__tablename__}'") + + print() + print("=" * 70) + print(f"✓ Schema verification complete") + print(f" - Tables checked: {tables_checked}") + print(f" - Columns added: {total_added}") + print("=" * 70) + + return 0 if total_added == 0 else 0 # Return 0 even if columns were added (success) + + except ImportError as e: + print(f"✗ Error importing models: {e}") + print(" This script must be run from the application root directory") + return 1 + except Exception as e: + print(f"✗ Error during schema verification: {e}") + import traceback + traceback.print_exc() + return 1 + + except OperationalError as e: + print(f"✗ Database connection error: {e}") + print(" Please check your DATABASE_URL environment variable") + return 1 + except Exception as e: + print(f"✗ Unexpected error: {e}") + import traceback + traceback.print_exc() + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/setup.py b/setup.py index 882504a..738aae1 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ from setuptools import setup, find_packages setup( name='timetracker', - version='4.2.0', + version='4.2.1', packages=find_packages(), include_package_data=True, install_requires=[