diff --git a/frontend/templates/index.html b/frontend/templates/index.html
index ecb86a20..eb3174c9 100644
--- a/frontend/templates/index.html
+++ b/frontend/templates/index.html
@@ -66,6 +66,9 @@
{% include 'components/notifications_section.html' %}
+
+ {% include 'components/backup_restore_section.html' %}
+
{% include 'components/prowlarr_section.html' %}
@@ -83,6 +86,8 @@
+
+
diff --git a/rules.md b/rules.md
index 953949f4..9d0ce6cf 100644
--- a/rules.md
+++ b/rules.md
@@ -31,14 +31,15 @@ docker logs huntarr
#### **Sidebar Types:**
1. **Main Sidebar** (`#sidebar`) - Default navigation (Home, Apps, Swaparr, Requestarr, etc.)
2. **Apps Sidebar** (`#apps-sidebar`) - App-specific navigation (Sonarr, Radarr, Lidarr, etc.)
-3. **Settings Sidebar** (`#settings-sidebar`) - Settings navigation (Main, Scheduling, Notifications, User)
+3. **Settings Sidebar** (`#settings-sidebar`) - Settings navigation (Main, Scheduling, Notifications, Backup/Restore, User)
4. **Requestarr Sidebar** (`#requestarr-sidebar`) - Requestarr navigation (Home, History)
#### **Navigation Logic Pattern:**
```javascript
// CRITICAL: All sidebar sections must be included in initialization logic
if (this.currentSection === 'settings' || this.currentSection === 'scheduling' ||
- this.currentSection === 'notifications' || this.currentSection === 'user') {
+ this.currentSection === 'notifications' || this.currentSection === 'backup-restore' ||
+ this.currentSection === 'user') {
this.showSettingsSidebar();
} else if (this.currentSection === 'requestarr' || this.currentSection === 'requestarr-history') {
this.showRequestarrSidebar();
@@ -52,6 +53,12 @@ if (this.currentSection === 'settings' || this.currentSection === 'scheduling' |
}
```
+#### **Apps Navigation Behavior:**
+- **Clicking "Apps"** → Automatically redirects to Sonarr (most commonly used app)
+- **Apps nav item stays highlighted** when viewing any app (Sonarr, Radarr, etc.)
+- **Apps sidebar provides navigation** between individual apps
+- **Direct URLs still work** (`#sonarr`, `#radarr`, etc.) for bookmarking specific apps
+
#### **Sidebar Switching Functions:**
```javascript
showMainSidebar: function() {
@@ -415,6 +422,263 @@ docker exec huntarr du -h /config/huntarr.db
5. **Unique constraints prevent duplicates** - app_name combinations are unique
6. **Transactions for consistency** - DatabaseManager handles commit/rollback
+## 💾 BACKUP & RESTORE SYSTEM
+
+### Overview
+**Huntarr includes a comprehensive backup and restore system for database protection and recovery.**
+
+**Key Features:**
+- **Automatic scheduled backups** based on user-defined frequency
+- **Manual backup creation** with progress tracking
+- **Multi-database backup** (huntarr.db, logs.db, manager.db)
+- **Database integrity verification** before and after operations
+- **Cross-platform backup storage** with environment detection
+- **Backup retention management** with automatic cleanup
+- **Safe restoration** with pre-restore backup creation
+
+### Backup Storage Structure
+```
+/config/backups/ # Docker environment
+├── scheduled_backup_2025-08-24_02-05-16/ # Backup folder (timestamp-based)
+│ ├── backup_info.json # Backup metadata
+│ ├── huntarr.db # Main database backup
+│ ├── logs.db # Logs database backup
+│ └── manager.db # Manager database backup (if exists)
+└── manual_backup_2025-08-24_14-30-00/ # Manual backup folder
+ ├── backup_info.json
+ └── [database files...]
+```
+
+**Storage Locations:**
+- **Docker:** `/config/backups/` (persistent volume)
+- **Windows:** `%APPDATA%/Huntarr/backups/`
+- **Local Development:** `{project_root}/data/backups/`
+
+### Backup System Architecture
+
+#### **BackupManager Class** (`src/routes/backup_routes.py`)
+```python
+class BackupManager:
+ def create_backup(self, backup_type='manual', name=None) # Creates verified backup
+ def restore_backup(self, backup_id) # Restores with pre-backup
+ def list_backups(self) # Lists available backups
+ def delete_backup(self, backup_id) # Deletes specific backup
+ def get_backup_settings(self) # Gets user settings
+ def save_backup_settings(self, settings) # Saves user settings
+ def _cleanup_old_backups(self) # Retention management
+```
+
+#### **BackupScheduler Class** (`src/routes/backup_routes.py`)
+```python
+class BackupScheduler:
+ def start(self) # Starts background scheduler thread
+ def stop(self) # Gracefully stops scheduler
+ def _should_create_backup(self) # Checks if backup is due
+ def _scheduler_loop(self) # Main scheduling loop (hourly checks)
+```
+
+### API Endpoints
+```python
+# Backup Settings
+GET/POST /api/backup/settings # Get/set backup frequency & retention
+
+# Backup Operations
+POST /api/backup/create # Create manual backup
+GET /api/backup/list # List available backups
+POST /api/backup/restore # Restore from backup
+POST /api/backup/delete # Delete specific backup
+GET /api/backup/next-scheduled # Get next scheduled backup time
+
+# Destructive Operations
+POST /api/backup/delete-database # Delete current database (testing/reset)
+```
+
+### Frontend Integration
+
+#### **Settings Navigation** (`frontend/templates/components/sidebar.html`)
+```html
+
+
+
+
+
+ Backup / Restore
+
+```
+
+#### **Section Handling** (`frontend/static/js/new-main.js`)
+```javascript
+// CRITICAL: backup-restore must be included in settings sections
+if (this.currentSection === 'settings' || this.currentSection === 'scheduling' ||
+ this.currentSection === 'notifications' || this.currentSection === 'backup-restore' ||
+ this.currentSection === 'user') {
+ this.showSettingsSidebar();
+}
+
+// Section initialization
+} else if (section === 'backup-restore' && document.getElementById('backupRestoreSection')) {
+ document.getElementById('backupRestoreSection').classList.add('active');
+ document.getElementById('backupRestoreSection').style.display = 'block';
+ if (document.getElementById('settingsBackupRestoreNav'))
+ document.getElementById('settingsBackupRestoreNav').classList.add('active');
+ this.currentSection = 'backup-restore';
+ this.showSettingsSidebar();
+ this.initializeBackupRestore();
+}
+```
+
+#### **JavaScript Module** (`frontend/static/js/backup-restore.js`)
+```javascript
+const BackupRestore = {
+ initialize: function() // Main initialization
+ createManualBackup: function() // Manual backup with progress
+ restoreBackup: function() // Restore with confirmations
+ deleteDatabase: function() # Destructive operation with safeguards
+ loadBackupList: function() // Refresh backup list
+ validateRestoreConfirmation: function() // "RESTORE" confirmation
+ validateDeleteConfirmation: function() // "huntarr" confirmation
+}
+```
+
+### Safety & Security Features
+
+#### **Multi-Level Confirmations**
+1. **Restore Operations:**
+ - Select backup from dropdown
+ - Type "RESTORE" to enable button
+ - Final browser confirmation dialog
+ - Pre-restore backup creation
+
+2. **Database Deletion:**
+ - Type "huntarr" to enable deletion
+ - Final browser confirmation dialog
+ - Multiple warning messages
+
+#### **Data Integrity**
+```python
+def _verify_database_integrity(self, db_path):
+ """Verify database integrity using SQLite PRAGMA"""
+ conn = sqlite3.connect(db_path)
+ result = conn.execute("PRAGMA integrity_check").fetchone()
+ return result and result[0] == "ok"
+```
+
+#### **Backup Verification Process**
+1. **Pre-backup:** Force WAL checkpoint
+2. **During backup:** Copy all database files
+3. **Post-backup:** Verify each backup file integrity
+4. **Cleanup:** Remove corrupted backups automatically
+
+### Configuration & Settings
+
+#### **Default Settings**
+```python
+backup_settings = {
+ 'frequency': 3, # Days between automatic backups
+ 'retention': 3 # Number of backups to keep
+}
+```
+
+#### **Settings Storage** (Database)
+```sql
+-- Stored in general_settings table
+INSERT INTO general_settings (setting_key, setting_value) VALUES
+('backup_frequency', '3'),
+('backup_retention', '3'),
+('last_backup_time', '2025-08-24T02:05:16');
+```
+
+### Troubleshooting Backup Issues
+
+#### **Common Problems & Solutions**
+
+1. **Backup Directory Not Writable**
+ ```python
+ # System falls back to alternative locations
+ # Windows: Documents/Huntarr/ if AppData fails
+ # Check logs for "Database directory not writable"
+ ```
+
+2. **Database Corruption During Backup**
+ ```bash
+ # Check integrity manually
+ docker exec huntarr sqlite3 /config/huntarr.db "PRAGMA integrity_check"
+
+ # Backup system auto-detects and handles corruption
+ # Creates corrupted_backup_[timestamp].db for recovery
+ ```
+
+3. **Scheduler Not Running**
+ ```bash
+ # Check logs for scheduler startup
+ docker logs huntarr | grep -i "backup scheduler"
+
+ # Should see: "Backup scheduler started"
+ ```
+
+4. **Restore Failures**
+ ```python
+ # System creates pre-restore backup automatically
+ # Check /config/backups/ for pre_restore_backup_[timestamp] folders
+ # Original data preserved even if restore fails
+ ```
+
+#### **Debugging Commands**
+```bash
+# Check backup directory
+docker exec huntarr ls -la /config/backups/
+
+# Verify backup integrity
+docker exec huntarr sqlite3 /config/backups/[backup_folder]/huntarr.db "PRAGMA integrity_check"
+
+# Check backup settings
+docker exec huntarr sqlite3 /config/huntarr.db "SELECT * FROM general_settings WHERE setting_key LIKE 'backup_%'"
+
+# Monitor backup scheduler
+docker logs huntarr | grep -i backup
+```
+
+### Development Guidelines for Backup System
+
+#### **Adding New Database Files**
+```python
+def _get_all_database_paths(self):
+ """Update this method when adding new database files"""
+ databases = {
+ 'huntarr': str(main_db_path),
+ 'logs': str(logs_db_path),
+ 'manager': str(manager_db_path),
+ 'new_db': str(new_db_path) # Add new databases here
+ }
+ return databases
+```
+
+#### **Backup Metadata Format**
+```json
+{
+ "id": "backup_name",
+ "name": "backup_name",
+ "type": "manual|scheduled|pre-restore",
+ "timestamp": "2025-08-24T02:05:16",
+ "databases": [
+ {
+ "name": "huntarr",
+ "size": 249856,
+ "path": "/config/backups/backup_name/huntarr.db"
+ }
+ ],
+ "size": 1331200
+}
+```
+
+#### **Critical Requirements**
+1. **Always verify backup integrity** before considering backup complete
+2. **Create pre-restore backup** before any restore operation
+3. **Use cross-platform paths** - never hardcode `/config/`
+4. **Handle backup corruption gracefully** with user notifications
+5. **Implement proper cleanup** based on retention settings
+6. **Provide clear user feedback** for all operations
+
## 🔧 DEVELOPMENT WORKFLOW
### Before Any Changes
@@ -438,6 +702,11 @@ docker exec huntarr du -h /config/huntarr.db
- [ ] Test subpath scenarios (`domain.com/huntarr/`)
- [ ] Check browser console for errors
- [ ] **NEW:** Verify database persistence across container restarts
+- [ ] **NEW:** Test backup/restore functionality if modified:
+ - [ ] Verify backup creation and integrity
+ - [ ] Test backup scheduler startup
+ - [ ] Verify backup directory creation (`/config/backups/`)
+ - [ ] Test restore confirmation workflow
- [ ] Get user approval before committing
### Proactive Violation Scanning
@@ -489,15 +758,20 @@ grep -r "fetch('/api/" frontend/ --include="*.js"
- `/src/primary/cycle_tracker.py` - Timer functionality
- `/src/primary/utils/logger.py` - Logging configuration
- `/src/primary/utils/database.py` - **NEW:** DatabaseManager class (replaces settings_manager.py)
+- `/src/routes/backup_routes.py` - **NEW:** Backup & Restore API endpoints and BackupManager
### Frontend Core
- `/frontend/static/js/new-main.js` - Main UI logic
- `/frontend/static/js/settings_forms.js` - Settings forms
+- `/frontend/static/js/backup-restore.js` - **NEW:** Backup & Restore functionality
- `/frontend/templates/components/` - UI components
+- `/frontend/templates/components/backup_restore_section.html` - **NEW:** Backup & Restore UI
### Database & Storage
- `/config/huntarr.db` - **Docker:** Main database file (persistent)
- `./data/huntarr.db` - **Local:** Main database file (development)
+- `/config/backups/` - **Docker:** Backup storage directory (persistent)
+- `./data/backups/` - **Local:** Backup storage directory (development)
- `/src/primary/utils/database.py` - DatabaseManager with auto-detection
- **REMOVED:** All JSON files (settings.json, stateful.json, etc.)
@@ -702,6 +976,63 @@ grep -r 'id="[^"]*"' docs/apps/ | grep -o 'id="[^"]*"' | sort | uniq
- Updated `src/primary/apps/radarr/upgrade.py` to check release dates
- Both missing and upgrade searches now respect `skip_future_releases` and `process_no_release_dates`
- Documentation updated to clarify behavior affects both search types
+
+### Apps Navigation Redesign (2024-12)
+**Issue:** Apps section showed a dashboard that users had to navigate through to reach individual app settings
+**User Feedback:** "Instead of an apps dashboard; make it where when a user clicks apps, it goes to the sonarr apps being selected by default"
+**Solution:** Direct navigation to Sonarr when clicking Apps, eliminating the dashboard step
+
+**Implementation Changes:**
+
+1. **Modified Apps Section Navigation** (`frontend/static/js/new-main.js`):
+ ```javascript
+ // OLD: Showed apps dashboard
+ } else if (section === 'apps') {
+ document.getElementById('appsSection').classList.add('active');
+ // ... dashboard logic
+
+ // NEW: Direct redirect to Sonarr
+ } else if (section === 'apps') {
+ console.log('[huntarrUI] Apps section requested - redirecting to Sonarr by default');
+ this.switchSection('sonarr');
+ window.location.hash = '#sonarr';
+ return;
+ ```
+
+2. **Updated Navigation Highlighting** (`frontend/templates/components/sidebar.html`):
+ ```javascript
+ // Keep "Apps" nav item active when viewing Sonarr
+ } else if (currentHash === '#apps' || currentHash === '#sonarr') {
+ selector = '#appsNav';
+ ```
+
+3. **Preserved Apps Sidebar Functionality**:
+ - Apps sidebar still provides navigation between all apps (Sonarr, Radarr, Lidarr, etc.)
+ - Return button allows going back to main navigation
+ - Individual app sections remain fully functional
+
+**User Experience Improvements:**
+- **Faster Access**: Click "Apps" → Immediately see Sonarr settings (most commonly used)
+- **Intuitive Navigation**: Apps nav item stays highlighted when viewing any app
+- **Preserved Functionality**: All apps still accessible via Apps sidebar
+- **Consistent Behavior**: Maintains existing sidebar switching patterns
+
+**Navigation Flow:**
+```
+Main Sidebar: Apps → Sonarr (default)
+Apps Sidebar: Sonarr ↔ Radarr ↔ Lidarr ↔ Readarr ↔ Whisparr V2 ↔ Whisparr V3 ↔ Prowlarr
+```
+
+**Files Modified:**
+- `frontend/static/js/new-main.js` - Apps section redirect logic
+- `frontend/templates/components/sidebar.html` - Navigation highlighting logic
+- Removed dashboard functionality from `frontend/templates/components/apps_section.html`
+
+**Benefits:**
+- Eliminates unnecessary dashboard step
+- Provides direct access to most commonly used app (Sonarr)
+- Maintains all existing functionality through sidebar navigation
+- Improves user workflow efficiency
- Frontend info icons fixed to use GitHub documentation links
**User Benefit:** Consistent behavior - no more unexpected future movie upgrades
diff --git a/src/primary/web_server.py b/src/primary/web_server.py
index 25122e1f..10b81164 100644
--- a/src/primary/web_server.py
+++ b/src/primary/web_server.py
@@ -50,6 +50,9 @@ from src.primary.routes.history_routes import history_blueprint
# Import scheduler blueprint
from src.primary.routes.scheduler_routes import scheduler_api
+# Import backup blueprint
+from src.routes.backup_routes import backup_bp
+
# Import log routes blueprint
from src.primary.routes.log_routes import log_routes_bp
@@ -277,6 +280,7 @@ app.register_blueprint(stateful_api, url_prefix='/api/stateful')
app.register_blueprint(history_blueprint, url_prefix='/api/hunt-manager')
app.register_blueprint(scheduler_api)
app.register_blueprint(log_routes_bp)
+app.register_blueprint(backup_bp)
# Register the authentication check to run before requests
app.before_request(authenticate_request)
diff --git a/src/routes/backup_routes.py b/src/routes/backup_routes.py
new file mode 100644
index 00000000..000b3ebc
--- /dev/null
+++ b/src/routes/backup_routes.py
@@ -0,0 +1,629 @@
+"""
+Backup and Restore API routes for Huntarr
+Handles database backup creation, restoration, and management
+"""
+
+import os
+import json
+import shutil
+import sqlite3
+import time
+import threading
+from datetime import datetime, timedelta
+from pathlib import Path
+from flask import Blueprint, request, jsonify
+from src.primary.utils.database import get_database
+from src.primary.routes.common import get_user_for_request
+import logging
+
+logger = logging.getLogger(__name__)
+
+backup_bp = Blueprint('backup', __name__)
+
+class BackupScheduler:
+ """Handles automatic backup scheduling"""
+
+ def __init__(self, backup_manager):
+ self.backup_manager = backup_manager
+ self.scheduler_thread = None
+ self.stop_event = threading.Event()
+ self.running = False
+
+ def start(self):
+ """Start the backup scheduler"""
+ if self.running:
+ return
+
+ self.stop_event.clear()
+ self.scheduler_thread = threading.Thread(target=self._scheduler_loop, daemon=True)
+ self.scheduler_thread.start()
+ self.running = True
+ logger.info("Backup scheduler started")
+
+ def stop(self):
+ """Stop the backup scheduler"""
+ if not self.running:
+ return
+
+ self.stop_event.set()
+ if self.scheduler_thread:
+ self.scheduler_thread.join(timeout=5)
+ self.running = False
+ logger.info("Backup scheduler stopped")
+
+ def _scheduler_loop(self):
+ """Main scheduler loop"""
+ while not self.stop_event.is_set():
+ try:
+ if self._should_create_backup():
+ logger.info("Creating scheduled backup")
+ backup_info = self.backup_manager.create_backup('scheduled', None)
+
+ # Update last backup time
+ self.backup_manager.db.set_general_setting('last_backup_time', backup_info['timestamp'])
+ logger.info(f"Scheduled backup created: {backup_info['name']}")
+
+ # Check every hour
+ self.stop_event.wait(3600)
+
+ except Exception as e:
+ logger.error(f"Error in backup scheduler: {e}")
+ # Wait before retrying
+ self.stop_event.wait(300) # 5 minutes
+
+ def _should_create_backup(self):
+ """Check if a backup should be created"""
+ try:
+ settings = self.backup_manager.get_backup_settings()
+ frequency_days = settings['frequency']
+
+ last_backup_time = self.backup_manager.db.get_general_setting('last_backup_time')
+
+ if not last_backup_time:
+ # No previous backup, create one
+ return True
+
+ last_backup = datetime.fromisoformat(last_backup_time)
+ next_backup = last_backup + timedelta(days=frequency_days)
+
+ return datetime.now() >= next_backup
+
+ except Exception as e:
+ logger.error(f"Error checking backup schedule: {e}")
+ return False
+
+# Global backup scheduler instance
+backup_scheduler = None
+
+class BackupManager:
+ """Manages database backups and restoration"""
+
+ def __init__(self):
+ self.db = get_database()
+ self.backup_dir = self._get_backup_directory()
+ self.ensure_backup_directory()
+
+ def _get_backup_directory(self):
+ """Get the backup directory path based on environment"""
+ # Check if running in Docker (config directory exists)
+ config_dir = Path("/config")
+ if config_dir.exists() and config_dir.is_dir():
+ return config_dir / "backups"
+
+ # Check Windows AppData
+ import platform
+ if platform.system() == "Windows":
+ appdata = os.environ.get("APPDATA", os.path.expanduser("~"))
+ windows_config_dir = Path(appdata) / "Huntarr"
+ return windows_config_dir / "backups"
+
+ # For local development, use data directory in project root
+ project_root = Path(__file__).parent.parent.parent
+ data_dir = project_root / "data"
+ return data_dir / "backups"
+
+ def ensure_backup_directory(self):
+ """Ensure backup directory exists"""
+ try:
+ self.backup_dir.mkdir(parents=True, exist_ok=True)
+ logger.info(f"Backup directory ensured: {self.backup_dir}")
+ except Exception as e:
+ logger.error(f"Failed to create backup directory: {e}")
+ raise
+
+ def get_backup_settings(self):
+ """Get backup settings from database"""
+ try:
+ frequency = self.db.get_general_setting('backup_frequency', 3)
+ retention = self.db.get_general_setting('backup_retention', 3)
+
+ return {
+ 'frequency': int(frequency),
+ 'retention': int(retention)
+ }
+ except Exception as e:
+ logger.error(f"Error getting backup settings: {e}")
+ return {'frequency': 3, 'retention': 3}
+
+ def save_backup_settings(self, settings):
+ """Save backup settings to database"""
+ try:
+ self.db.set_general_setting('backup_frequency', settings.get('frequency', 3))
+ self.db.set_general_setting('backup_retention', settings.get('retention', 3))
+ logger.info(f"Backup settings saved: {settings}")
+ return True
+ except Exception as e:
+ logger.error(f"Error saving backup settings: {e}")
+ return False
+
+ def create_backup(self, backup_type='manual', name=None):
+ """Create a backup of all databases"""
+ try:
+ # Generate backup name if not provided
+ if not name:
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ name = f"{backup_type}_backup_{timestamp}"
+
+ # Create backup folder with timestamp
+ backup_folder = self.backup_dir / name
+ backup_folder.mkdir(parents=True, exist_ok=True)
+
+ # Get all database paths
+ databases = self._get_all_database_paths()
+
+ backup_info = {
+ 'id': name,
+ 'name': name,
+ 'type': backup_type,
+ 'timestamp': datetime.now().isoformat(),
+ 'databases': [],
+ 'size': 0
+ }
+
+ # Backup each database
+ for db_name, db_path in databases.items():
+ if Path(db_path).exists():
+ backup_db_path = backup_folder / f"{db_name}.db"
+
+ # Force WAL checkpoint before backup
+ try:
+ conn = sqlite3.connect(db_path)
+ conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")
+ conn.close()
+ except Exception as e:
+ logger.warning(f"Could not checkpoint {db_name}: {e}")
+
+ # Copy database file
+ shutil.copy2(db_path, backup_db_path)
+
+ # Verify backup integrity
+ if self._verify_database_integrity(backup_db_path):
+ db_size = backup_db_path.stat().st_size
+ backup_info['databases'].append({
+ 'name': db_name,
+ 'size': db_size,
+ 'path': str(backup_db_path)
+ })
+ backup_info['size'] += db_size
+ logger.info(f"Backed up {db_name} ({db_size} bytes)")
+ else:
+ logger.error(f"Backup verification failed for {db_name}")
+ backup_db_path.unlink(missing_ok=True)
+ raise Exception(f"Backup verification failed for {db_name}")
+
+ # Save backup metadata
+ metadata_path = backup_folder / "backup_info.json"
+ with open(metadata_path, 'w') as f:
+ json.dump(backup_info, f, indent=2)
+
+ # Clean up old backups based on retention policy
+ self._cleanup_old_backups()
+
+ logger.info(f"Backup created successfully: {name} ({backup_info['size']} bytes)")
+ return backup_info
+
+ except Exception as e:
+ logger.error(f"Error creating backup: {e}")
+ # Clean up failed backup
+ if 'backup_folder' in locals() and backup_folder.exists():
+ shutil.rmtree(backup_folder, ignore_errors=True)
+ raise
+
+ def _get_all_database_paths(self):
+ """Get paths to all Huntarr databases"""
+ databases = {}
+
+ # Main database
+ main_db_path = self.db.db_path
+ databases['huntarr'] = str(main_db_path)
+
+ # Logs database (if exists)
+ logs_db_path = main_db_path.parent / "logs.db"
+ if logs_db_path.exists():
+ databases['logs'] = str(logs_db_path)
+
+ # Manager database (if exists)
+ manager_db_path = main_db_path.parent / "manager.db"
+ if manager_db_path.exists():
+ databases['manager'] = str(manager_db_path)
+
+ return databases
+
+ def _verify_database_integrity(self, db_path):
+ """Verify database integrity"""
+ try:
+ conn = sqlite3.connect(db_path)
+ result = conn.execute("PRAGMA integrity_check").fetchone()
+ conn.close()
+ return result and result[0] == "ok"
+ except Exception as e:
+ logger.error(f"Database integrity check failed: {e}")
+ return False
+
+ def list_backups(self):
+ """List all available backups"""
+ try:
+ backups = []
+
+ if not self.backup_dir.exists():
+ return backups
+
+ for backup_folder in self.backup_dir.iterdir():
+ if backup_folder.is_dir():
+ metadata_path = backup_folder / "backup_info.json"
+
+ if metadata_path.exists():
+ try:
+ with open(metadata_path, 'r') as f:
+ backup_info = json.load(f)
+ backups.append(backup_info)
+ except Exception as e:
+ logger.warning(f"Could not read backup metadata for {backup_folder.name}: {e}")
+ # Create basic info from folder
+ backups.append({
+ 'id': backup_folder.name,
+ 'name': backup_folder.name,
+ 'type': 'unknown',
+ 'timestamp': datetime.fromtimestamp(backup_folder.stat().st_mtime).isoformat(),
+ 'size': sum(f.stat().st_size for f in backup_folder.rglob('*.db') if f.is_file())
+ })
+
+ # Sort by timestamp (newest first)
+ backups.sort(key=lambda x: x['timestamp'], reverse=True)
+ return backups
+
+ except Exception as e:
+ logger.error(f"Error listing backups: {e}")
+ return []
+
+ def restore_backup(self, backup_id):
+ """Restore a backup"""
+ try:
+ backup_folder = self.backup_dir / backup_id
+
+ if not backup_folder.exists():
+ raise Exception(f"Backup not found: {backup_id}")
+
+ # Load backup metadata
+ metadata_path = backup_folder / "backup_info.json"
+ if metadata_path.exists():
+ with open(metadata_path, 'r') as f:
+ backup_info = json.load(f)
+ else:
+ raise Exception("Backup metadata not found")
+
+ # Get current database paths
+ databases = self._get_all_database_paths()
+
+ # Create backup of current databases before restore
+ current_backup_name = f"pre_restore_backup_{int(time.time())}"
+ logger.info(f"Creating backup of current databases: {current_backup_name}")
+ self.create_backup('pre-restore', current_backup_name)
+
+ # Restore each database
+ restored_databases = []
+ for db_info in backup_info.get('databases', []):
+ db_name = db_info['name']
+ backup_db_path = Path(db_info['path'])
+
+ if db_name in databases and backup_db_path.exists():
+ current_db_path = Path(databases[db_name])
+
+ # Verify backup database integrity
+ if not self._verify_database_integrity(backup_db_path):
+ raise Exception(f"Backup database {db_name} is corrupted")
+
+ # Stop any connections to the database
+ if hasattr(self.db, 'close_connections'):
+ self.db.close_connections()
+
+ # Replace current database with backup
+ if current_db_path.exists():
+ current_db_path.unlink()
+
+ shutil.copy2(backup_db_path, current_db_path)
+
+ # Verify restored database
+ if self._verify_database_integrity(current_db_path):
+ restored_databases.append(db_name)
+ logger.info(f"Restored database: {db_name}")
+ else:
+ raise Exception(f"Restored database {db_name} failed integrity check")
+
+ logger.info(f"Backup restored successfully: {backup_id}")
+ return {
+ 'backup_id': backup_id,
+ 'restored_databases': restored_databases,
+ 'timestamp': datetime.now().isoformat()
+ }
+
+ except Exception as e:
+ logger.error(f"Error restoring backup: {e}")
+ raise
+
+ def delete_backup(self, backup_id):
+ """Delete a backup"""
+ try:
+ backup_folder = self.backup_dir / backup_id
+
+ if not backup_folder.exists():
+ raise Exception(f"Backup not found: {backup_id}")
+
+ shutil.rmtree(backup_folder)
+ logger.info(f"Backup deleted: {backup_id}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error deleting backup: {e}")
+ raise
+
+ def delete_database(self):
+ """Delete the current database (destructive operation)"""
+ try:
+ databases = self._get_all_database_paths()
+ deleted_databases = []
+
+ for db_name, db_path in databases.items():
+ db_file = Path(db_path)
+ if db_file.exists():
+ db_file.unlink()
+ deleted_databases.append(db_name)
+ logger.warning(f"Deleted database: {db_name}")
+
+ logger.warning(f"Database deletion completed: {deleted_databases}")
+ return deleted_databases
+
+ except Exception as e:
+ logger.error(f"Error deleting database: {e}")
+ raise
+
+ def _cleanup_old_backups(self):
+ """Clean up old backups based on retention policy"""
+ try:
+ settings = self.get_backup_settings()
+ retention_count = settings['retention']
+
+ backups = self.list_backups()
+
+ # Keep only the most recent backups
+ if len(backups) > retention_count:
+ backups_to_delete = backups[retention_count:]
+
+ for backup in backups_to_delete:
+ try:
+ self.delete_backup(backup['id'])
+ logger.info(f"Cleaned up old backup: {backup['id']}")
+ except Exception as e:
+ logger.warning(f"Failed to clean up backup {backup['id']}: {e}")
+
+ except Exception as e:
+ logger.error(f"Error during backup cleanup: {e}")
+
+ def get_next_scheduled_backup(self):
+ """Get the next scheduled backup time"""
+ try:
+ settings = self.get_backup_settings()
+ frequency_days = settings['frequency']
+
+ # Get the last backup time
+ last_backup_time = self.db.get_general_setting('last_backup_time')
+
+ if last_backup_time:
+ last_backup = datetime.fromisoformat(last_backup_time)
+ next_backup = last_backup + timedelta(days=frequency_days)
+ else:
+ # If no previous backup, schedule for tomorrow
+ next_backup = datetime.now() + timedelta(days=1)
+
+ return next_backup.isoformat()
+
+ except Exception as e:
+ logger.error(f"Error calculating next backup time: {e}")
+ return None
+
+# Initialize backup manager and scheduler
+backup_manager = BackupManager()
+backup_scheduler = BackupScheduler(backup_manager)
+
+# Start the backup scheduler
+backup_scheduler.start()
+
+@backup_bp.route('/api/backup/settings', methods=['GET', 'POST'])
+def backup_settings():
+ """Get or set backup settings"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ if request.method == 'GET':
+ settings = backup_manager.get_backup_settings()
+ return jsonify({
+ 'success': True,
+ 'settings': settings
+ })
+
+ elif request.method == 'POST':
+ data = request.get_json() or {}
+
+ # Validate settings
+ frequency = int(data.get('frequency', 3))
+ retention = int(data.get('retention', 3))
+
+ if frequency < 1 or frequency > 30:
+ return jsonify({"success": False, "error": "Frequency must be between 1 and 30 days"}), 400
+
+ if retention < 1 or retention > 10:
+ return jsonify({"success": False, "error": "Retention must be between 1 and 10 backups"}), 400
+
+ settings = {
+ 'frequency': frequency,
+ 'retention': retention
+ }
+
+ if backup_manager.save_backup_settings(settings):
+ return jsonify({
+ 'success': True,
+ 'settings': settings
+ })
+ else:
+ return jsonify({"success": False, "error": "Failed to save settings"}), 500
+
+ except Exception as e:
+ logger.error(f"Error in backup settings: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@backup_bp.route('/api/backup/create', methods=['POST'])
+def create_backup():
+ """Create a manual backup"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ data = request.get_json() or {}
+ backup_type = data.get('type', 'manual')
+ backup_name = data.get('name')
+
+ backup_info = backup_manager.create_backup(backup_type, backup_name)
+
+ # Update last backup time
+ backup_manager.db.set_general_setting('last_backup_time', backup_info['timestamp'])
+
+ return jsonify({
+ 'success': True,
+ 'backup_name': backup_info['name'],
+ 'backup_size': backup_info['size'],
+ 'timestamp': backup_info['timestamp']
+ })
+
+ except Exception as e:
+ logger.error(f"Error creating backup: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@backup_bp.route('/api/backup/list', methods=['GET'])
+def list_backups():
+ """List all available backups"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ backups = backup_manager.list_backups()
+ return jsonify({
+ 'success': True,
+ 'backups': backups
+ })
+
+ except Exception as e:
+ logger.error(f"Error listing backups: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@backup_bp.route('/api/backup/restore', methods=['POST'])
+def restore_backup():
+ """Restore a backup"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ data = request.get_json() or {}
+ backup_id = data.get('backup_id')
+
+ if not backup_id:
+ return jsonify({"success": False, "error": "Backup ID required"}), 400
+
+ restore_info = backup_manager.restore_backup(backup_id)
+
+ return jsonify({
+ 'success': True,
+ 'restore_info': restore_info
+ })
+
+ except Exception as e:
+ logger.error(f"Error restoring backup: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@backup_bp.route('/api/backup/delete', methods=['POST'])
+def delete_backup():
+ """Delete a backup"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ data = request.get_json() or {}
+ backup_id = data.get('backup_id')
+
+ if not backup_id:
+ return jsonify({"success": False, "error": "Backup ID required"}), 400
+
+ backup_manager.delete_backup(backup_id)
+
+ return jsonify({
+ 'success': True,
+ 'message': f'Backup {backup_id} deleted successfully'
+ })
+
+ except Exception as e:
+ logger.error(f"Error deleting backup: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@backup_bp.route('/api/backup/delete-database', methods=['POST'])
+def delete_database():
+ """Delete the current database (destructive operation)"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ deleted_databases = backup_manager.delete_database()
+
+ return jsonify({
+ 'success': True,
+ 'deleted_databases': deleted_databases,
+ 'message': 'Database deleted successfully'
+ })
+
+ except Exception as e:
+ logger.error(f"Error deleting database: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@backup_bp.route('/api/backup/next-scheduled', methods=['GET'])
+def next_scheduled_backup():
+ """Get the next scheduled backup time"""
+ username = get_user_for_request()
+ if not username:
+ return jsonify({"success": False, "error": "Authentication required"}), 401
+
+ try:
+ next_backup = backup_manager.get_next_scheduled_backup()
+
+ return jsonify({
+ 'success': True,
+ 'next_backup': next_backup
+ })
+
+ except Exception as e:
+ logger.error(f"Error getting next backup time: {e}")
+ return jsonify({"success": False, "error": str(e)}), 500
\ No newline at end of file
diff --git a/version.txt b/version.txt
index 121e098f..fc10bd48 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-8.2.7
+8.2.8