mirror of
https://github.com/plexguide/Huntarr.io.git
synced 2025-12-21 14:09:38 -06:00
Refactor logging configuration to disable basic logging setup and prevent duplicate messages. Update frontend polling intervals for various components, increasing API sync intervals from 10 seconds to 30 seconds, and from 30 seconds to 2 minutes, while also adjusting log polling from 2 seconds to 5 seconds for improved performance and reduced load.
This commit is contained in:
14
check_logs.sh
Executable file
14
check_logs.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "🔍 Quick Log Spam Check"
|
||||
echo "======================="
|
||||
|
||||
# Run the monitoring script once
|
||||
python3 monitor_logs.py
|
||||
|
||||
echo ""
|
||||
echo "💡 Tips:"
|
||||
echo " - Run 'python3 monitor_logs.py --continuous 30' for continuous monitoring"
|
||||
echo " - Press Ctrl+C to stop continuous monitoring"
|
||||
echo " - Logs are considered spam if they repeat more than 5 times"
|
||||
echo " - Timestamps are suspicious if they appear more than 2 times"
|
||||
@@ -79,11 +79,11 @@ window.CycleCountdown = (function() {
|
||||
clearInterval(refreshInterval);
|
||||
}
|
||||
|
||||
// Set up API sync every 10 seconds (not for display, just for accuracy)
|
||||
// Set up API sync every 30 seconds (reduced from 10 seconds, not for display, just for accuracy)
|
||||
refreshInterval = setInterval(() => {
|
||||
// Only refresh if not already fetching
|
||||
if (!isFetchingData) {
|
||||
console.log('[CycleCountdown] API sync (every 10s) to maintain accuracy...');
|
||||
console.log('[CycleCountdown] API sync (every 30s) to maintain accuracy...');
|
||||
fetchAllCycleData()
|
||||
.then((data) => {
|
||||
if (data && Object.keys(data).length > 0) {
|
||||
@@ -95,9 +95,9 @@ window.CycleCountdown = (function() {
|
||||
console.log('[CycleCountdown] API sync failed, timers continue with last known data');
|
||||
});
|
||||
}
|
||||
}, 10000); // API sync every 10 seconds
|
||||
}, 30000); // API sync every 30 seconds (reduced from 10 seconds)
|
||||
|
||||
console.log('[CycleCountdown] API sync interval started (10s) - timers run independently at 1s');
|
||||
console.log('[CycleCountdown] API sync interval started (30s) - timers run independently at 1s');
|
||||
}
|
||||
|
||||
// Start the refresh cycle
|
||||
|
||||
@@ -7,8 +7,8 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
// Initial load of hourly cap data
|
||||
loadHourlyCapData();
|
||||
|
||||
// Set up polling to refresh the hourly cap data every 30 seconds
|
||||
setInterval(loadHourlyCapData, 30000);
|
||||
// Set up polling to refresh the hourly cap data every 2 minutes (reduced from 30 seconds)
|
||||
setInterval(loadHourlyCapData, 120000);
|
||||
});
|
||||
|
||||
/**
|
||||
|
||||
@@ -301,10 +301,10 @@ window.LogsModule = {
|
||||
// Load initial logs
|
||||
this.loadLogsFromAPI(appType);
|
||||
|
||||
// Set up polling for new logs every 2 seconds
|
||||
// Set up polling for new logs every 5 seconds (reduced from 2 seconds)
|
||||
this.logPollingInterval = setInterval(() => {
|
||||
this.loadLogsFromAPI(appType, true);
|
||||
}, 2000);
|
||||
}, 5000);
|
||||
|
||||
// Update connection status
|
||||
if (this.elements.logConnectionStatus) {
|
||||
|
||||
4
main.py
4
main.py
@@ -34,8 +34,8 @@ class LocalTimeFormatter(logging.Formatter):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.converter = time.localtime # Use local time instead of UTC
|
||||
|
||||
# Set up logging with local time formatter
|
||||
logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
|
||||
# Disable basic logging to prevent duplicates - we use custom loggers
|
||||
# logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
|
||||
|
||||
# Apply local time converter to all existing handlers
|
||||
for handler in logging.root.handlers:
|
||||
|
||||
192
monitor_logs.py
Executable file
192
monitor_logs.py
Executable file
@@ -0,0 +1,192 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Huntarr Log Spam Monitor
|
||||
Monitors Docker logs for excessive spam messages and duplicate timestamps
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import time
|
||||
import re
|
||||
from collections import defaultdict, Counter
|
||||
from datetime import datetime, timedelta
|
||||
import sys
|
||||
|
||||
class LogSpamMonitor:
|
||||
def __init__(self):
|
||||
self.message_counts = Counter()
|
||||
self.timestamp_counts = Counter()
|
||||
self.recent_messages = []
|
||||
self.spam_threshold = 5 # Messages repeated more than this are considered spam
|
||||
self.time_window = 60 # Monitor last 60 seconds
|
||||
self.duplicate_threshold = 2 # Same timestamp appearing more than this is suspicious
|
||||
|
||||
def extract_timestamp_and_message(self, log_line):
|
||||
"""Extract timestamp and clean message from log line"""
|
||||
# Pattern to match: "2025-06-13 05:08:14 UTC - huntarr - LEVEL - message"
|
||||
pattern = r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) UTC.*?- (.*)'
|
||||
match = re.search(pattern, log_line)
|
||||
|
||||
if match:
|
||||
timestamp_str = match.group(1)
|
||||
message = match.group(2).strip()
|
||||
|
||||
# Clean up the message by removing variable parts
|
||||
# Remove session IDs, IP addresses, etc.
|
||||
message = re.sub(r'session_id: [a-f0-9]+', 'session_id: [REDACTED]', message)
|
||||
message = re.sub(r'IP address: [\d\.]+', 'IP address: [REDACTED]', message)
|
||||
message = re.sub(r'path \'[^\']+\'', 'path [REDACTED]', message)
|
||||
|
||||
return timestamp_str, message
|
||||
|
||||
return None, None
|
||||
|
||||
def analyze_logs(self, lines):
|
||||
"""Analyze log lines for spam and duplicates"""
|
||||
current_time = datetime.now()
|
||||
spam_detected = []
|
||||
duplicate_timestamps = []
|
||||
|
||||
# Clear old data
|
||||
self.message_counts.clear()
|
||||
self.timestamp_counts.clear()
|
||||
|
||||
for line in lines:
|
||||
timestamp_str, message = self.extract_timestamp_and_message(line)
|
||||
|
||||
if timestamp_str and message:
|
||||
# Count message occurrences
|
||||
self.message_counts[message] += 1
|
||||
|
||||
# Count timestamp occurrences (down to the second)
|
||||
self.timestamp_counts[timestamp_str] += 1
|
||||
|
||||
# Detect spam messages
|
||||
for message, count in self.message_counts.items():
|
||||
if count > self.spam_threshold:
|
||||
spam_detected.append({
|
||||
'message': message,
|
||||
'count': count,
|
||||
'type': 'repeated_message'
|
||||
})
|
||||
|
||||
# Detect duplicate timestamps
|
||||
for timestamp, count in self.timestamp_counts.items():
|
||||
if count > self.duplicate_threshold:
|
||||
duplicate_timestamps.append({
|
||||
'timestamp': timestamp,
|
||||
'count': count,
|
||||
'type': 'duplicate_timestamp'
|
||||
})
|
||||
|
||||
return spam_detected, duplicate_timestamps
|
||||
|
||||
def get_recent_logs(self, tail_lines=100):
|
||||
"""Get recent logs from Docker container"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['docker-compose', 'logs', 'huntarr', '--tail', str(tail_lines)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return result.stdout.strip().split('\n')
|
||||
else:
|
||||
print(f"Error getting logs: {result.stderr}")
|
||||
return []
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print("Timeout getting logs")
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Exception getting logs: {e}")
|
||||
return []
|
||||
|
||||
def print_report(self, spam_detected, duplicate_timestamps):
|
||||
"""Print a formatted report of detected issues"""
|
||||
print(f"\n{'='*80}")
|
||||
print(f"LOG SPAM MONITOR REPORT - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print(f"{'='*80}")
|
||||
|
||||
if spam_detected:
|
||||
print(f"\n🚨 SPAM MESSAGES DETECTED ({len(spam_detected)} types):")
|
||||
print("-" * 60)
|
||||
for spam in spam_detected:
|
||||
print(f" Count: {spam['count']:3d} | Message: {spam['message'][:100]}...")
|
||||
|
||||
if duplicate_timestamps:
|
||||
print(f"\n⚠️ DUPLICATE TIMESTAMPS DETECTED ({len(duplicate_timestamps)} timestamps):")
|
||||
print("-" * 60)
|
||||
for dup in duplicate_timestamps:
|
||||
print(f" Count: {dup['count']:3d} | Timestamp: {dup['timestamp']}")
|
||||
|
||||
if not spam_detected and not duplicate_timestamps:
|
||||
print("\n✅ NO SPAM OR DUPLICATE TIMESTAMPS DETECTED")
|
||||
print(" Logs appear to be clean!")
|
||||
|
||||
print(f"\nThresholds: Spam > {self.spam_threshold} messages, Duplicates > {self.duplicate_threshold} timestamps")
|
||||
print(f"{'='*80}\n")
|
||||
|
||||
def monitor_continuously(self, interval=30):
|
||||
"""Monitor logs continuously"""
|
||||
print(f"🔍 Starting continuous log monitoring (checking every {interval} seconds)")
|
||||
print(f" Spam threshold: {self.spam_threshold} repeated messages")
|
||||
print(f" Duplicate threshold: {self.duplicate_threshold} same timestamps")
|
||||
print(" Press Ctrl+C to stop\n")
|
||||
|
||||
try:
|
||||
while True:
|
||||
lines = self.get_recent_logs(tail_lines=200)
|
||||
if lines:
|
||||
spam_detected, duplicate_timestamps = self.analyze_logs(lines)
|
||||
|
||||
# Only print report if issues are detected
|
||||
if spam_detected or duplicate_timestamps:
|
||||
self.print_report(spam_detected, duplicate_timestamps)
|
||||
else:
|
||||
# Just print a brief status
|
||||
print(f"✅ {datetime.now().strftime('%H:%M:%S')} - Logs clean (checked {len(lines)} lines)")
|
||||
|
||||
time.sleep(interval)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n\n🛑 Monitoring stopped by user")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error during monitoring: {e}")
|
||||
|
||||
def single_check(self):
|
||||
"""Perform a single check of the logs"""
|
||||
print("🔍 Performing single log spam check...")
|
||||
lines = self.get_recent_logs(tail_lines=200)
|
||||
|
||||
if lines:
|
||||
spam_detected, duplicate_timestamps = self.analyze_logs(lines)
|
||||
self.print_report(spam_detected, duplicate_timestamps)
|
||||
|
||||
# Return True if issues were found
|
||||
return len(spam_detected) > 0 or len(duplicate_timestamps) > 0
|
||||
else:
|
||||
print("❌ Could not retrieve logs")
|
||||
return False
|
||||
|
||||
def main():
|
||||
monitor = LogSpamMonitor()
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == '--continuous':
|
||||
# Continuous monitoring mode
|
||||
interval = 30
|
||||
if len(sys.argv) > 2:
|
||||
try:
|
||||
interval = int(sys.argv[2])
|
||||
except ValueError:
|
||||
print("Invalid interval, using default 30 seconds")
|
||||
|
||||
monitor.monitor_continuously(interval)
|
||||
else:
|
||||
# Single check mode
|
||||
issues_found = monitor.single_check()
|
||||
sys.exit(1 if issues_found else 0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -12,44 +12,48 @@ class WebAddressFilter(logging.Filter):
|
||||
return True
|
||||
|
||||
def configure_logging():
|
||||
# Disable this logging configuration to prevent duplicates
|
||||
# The main logging is handled by src/primary/utils/logger.py
|
||||
pass
|
||||
|
||||
# Get timezone set in the environment (this will be updated when user changes the timezone in UI)
|
||||
try:
|
||||
# Create a custom formatter that includes timezone information
|
||||
class TimezoneFormatter(logging.Formatter):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.converter = time.localtime # Use local time instead of UTC
|
||||
|
||||
def formatTime(self, record, datefmt=None):
|
||||
ct = self.converter(record.created)
|
||||
if datefmt:
|
||||
return time.strftime(datefmt, ct)
|
||||
else:
|
||||
# Use local time without timezone suffix for consistency
|
||||
return time.strftime("%Y-%m-%d %H:%M:%S", ct)
|
||||
|
||||
# Configure the formatter for all handlers
|
||||
formatter = TimezoneFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Reset the root logger and reconfigure with proper timezone handling
|
||||
for handler in logging.root.handlers[:]:
|
||||
logging.root.removeHandler(handler)
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Apply the formatter to all handlers
|
||||
for handler in logging.root.handlers:
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
except Exception as e:
|
||||
# Fallback to basic logging if any issues
|
||||
logging.basicConfig(level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logging.error(f"Error setting up timezone-aware logging: {e}")
|
||||
|
||||
# Add filter to remove web interface URL logs
|
||||
for handler in logging.root.handlers:
|
||||
handler.addFilter(WebAddressFilter())
|
||||
# try:
|
||||
# # Create a custom formatter that includes timezone information
|
||||
# class TimezoneFormatter(logging.Formatter):
|
||||
# def __init__(self, *args, **kwargs):
|
||||
# super().__init__(*args, **kwargs)
|
||||
# self.converter = time.localtime # Use local time instead of UTC
|
||||
#
|
||||
# def formatTime(self, record, datefmt=None):
|
||||
# ct = self.converter(record.created)
|
||||
# if datefmt:
|
||||
# return time.strftime(datefmt, ct)
|
||||
# else:
|
||||
# # Use local time without timezone suffix for consistency
|
||||
# return time.strftime("%Y-%m-%d %H:%M:%S", ct)
|
||||
#
|
||||
# # Configure the formatter for all handlers
|
||||
# formatter = TimezoneFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
#
|
||||
# # Reset the root logger and reconfigure with proper timezone handling
|
||||
# for handler in logging.root.handlers[:]:
|
||||
# logging.root.removeHandler(handler)
|
||||
#
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
#
|
||||
# # Apply the formatter to all handlers
|
||||
# for handler in logging.root.handlers:
|
||||
# handler.setFormatter(formatter)
|
||||
#
|
||||
# except Exception as e:
|
||||
# # Fallback to basic logging if any issues
|
||||
# logging.basicConfig(level=logging.INFO,
|
||||
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
# logging.error(f"Error setting up timezone-aware logging: {e}")
|
||||
#
|
||||
# # Add filter to remove web interface URL logs
|
||||
# for handler in logging.root.handlers:
|
||||
# handler.addFilter(WebAddressFilter())
|
||||
|
||||
logging.info("Logging is configured.")
|
||||
|
||||
|
||||
@@ -31,13 +31,20 @@ def get_status():
|
||||
# Get strike statistics from database for all configured apps
|
||||
app_statistics = {}
|
||||
|
||||
# Only read statistics if Swaparr is enabled to avoid unnecessary database errors
|
||||
if enabled and configured:
|
||||
try:
|
||||
db = get_database()
|
||||
|
||||
# Get all configured instances to check for state data
|
||||
if configured:
|
||||
instances = get_configured_instances()
|
||||
for app_name, app_instances in instances.items():
|
||||
# Only process apps that have Swaparr enabled for at least one instance
|
||||
swaparr_enabled_for_app = any(instance.get("swaparr_enabled", False) for instance in app_instances)
|
||||
|
||||
if not swaparr_enabled_for_app:
|
||||
continue # Skip apps that don't have Swaparr enabled
|
||||
|
||||
app_stats = {"error": None}
|
||||
|
||||
try:
|
||||
@@ -91,7 +98,8 @@ def get_status():
|
||||
{
|
||||
"instance_name": instance.get("instance_name", "Unknown"),
|
||||
"api_url": instance.get("api_url", "Not configured"),
|
||||
"enabled": instance.get("enabled", False)
|
||||
"enabled": instance.get("enabled", False),
|
||||
"swaparr_enabled": instance.get("swaparr_enabled", False)
|
||||
}
|
||||
for instance in app_instances
|
||||
]
|
||||
|
||||
@@ -214,7 +214,7 @@ def verify_user(username: str, password: str, otp_code: str = None) -> Tuple[boo
|
||||
valid_code = totp.verify(otp_code)
|
||||
logger.debug(f"OTP code validation result: {valid_code}")
|
||||
if valid_code:
|
||||
logger.info(f"User '{username}' authenticated successfully with 2FA.")
|
||||
logger.debug(f"User '{username}' authenticated successfully with 2FA.")
|
||||
return True, False
|
||||
else:
|
||||
logger.warning(f"Login attempt failed for user '{username}': Invalid 2FA code.")
|
||||
@@ -226,7 +226,7 @@ def verify_user(username: str, password: str, otp_code: str = None) -> Tuple[boo
|
||||
return False, True
|
||||
else:
|
||||
# 2FA not enabled, password is correct
|
||||
logger.info(f"User '{username}' authenticated successfully (no 2FA).")
|
||||
logger.debug(f"User '{username}' authenticated successfully (no 2FA).")
|
||||
return True, False
|
||||
else:
|
||||
logger.warning(f"Login attempt failed for user '{username}': Invalid password.")
|
||||
@@ -771,7 +771,7 @@ def verify_plex_token(token: str) -> Optional[Dict[str, Any]]:
|
||||
|
||||
if response.status_code == 200:
|
||||
user_data = response.json()
|
||||
logger.info(f"Plex token verified for user: {user_data.get('username', 'unknown')}")
|
||||
logger.debug(f"Plex token verified for user: {user_data.get('username', 'unknown')}")
|
||||
return user_data
|
||||
elif response.status_code == 401:
|
||||
logger.warning("Invalid Plex token")
|
||||
|
||||
@@ -665,7 +665,7 @@ def hourly_cap_scheduler_loop():
|
||||
# Initial check in case we're starting right at the top of an hour
|
||||
current_time = datetime.datetime.now()
|
||||
if current_time.minute == 0:
|
||||
logger.info(f"Initial hourly reset triggered at {current_time.hour}:00")
|
||||
logger.debug(f"Initial hourly reset triggered at {current_time.hour}:00")
|
||||
reset_hourly_caps()
|
||||
|
||||
# Main monitoring loop
|
||||
@@ -681,10 +681,10 @@ def hourly_cap_scheduler_loop():
|
||||
# Check if it's the top of the hour (00 minute mark)
|
||||
current_time = datetime.datetime.now()
|
||||
if current_time.minute == 0:
|
||||
logger.info(f"Hourly reset triggered at {current_time.hour}:00")
|
||||
logger.debug(f"Hourly reset triggered at {current_time.hour}:00")
|
||||
success = reset_hourly_caps()
|
||||
if success:
|
||||
logger.info(f"Successfully reset hourly API caps at {current_time.hour}:00")
|
||||
logger.debug(f"Successfully reset hourly API caps at {current_time.hour}:00")
|
||||
else:
|
||||
logger.error(f"Failed to reset hourly API caps at {current_time.hour}:00")
|
||||
|
||||
|
||||
@@ -44,11 +44,11 @@ def check_and_reset_caps():
|
||||
|
||||
# Only reset at the top of the hour (minute 0)
|
||||
if current_time.minute == 0:
|
||||
logger.info(f"Hourly reset triggered at {current_time.hour}:00")
|
||||
logger.debug(f"Hourly reset triggered at {current_time.hour}:00")
|
||||
try:
|
||||
success = reset_hourly_caps()
|
||||
if success:
|
||||
logger.info(f"Successfully reset hourly API caps at {current_time.hour}:00")
|
||||
logger.debug(f"Successfully reset hourly API caps at {current_time.hour}:00")
|
||||
else:
|
||||
logger.error(f"Failed to reset hourly API caps at {current_time.hour}:00")
|
||||
except Exception as e:
|
||||
|
||||
@@ -97,7 +97,7 @@ def login_route():
|
||||
session[SESSION_COOKIE_NAME] = session_token # Store token in Flask session immediately
|
||||
response = jsonify({"success": True, "redirect": "./"}) # Add redirect URL
|
||||
response.set_cookie(SESSION_COOKIE_NAME, session_token, httponly=True, samesite='Lax', path='/') # Add path
|
||||
logger.info(f"User '{username}' logged in successfully.")
|
||||
logger.debug(f"User '{username}' logged in successfully.")
|
||||
return response
|
||||
elif needs_2fa:
|
||||
# Authentication failed *because* 2FA was required (or code was invalid)
|
||||
@@ -208,7 +208,7 @@ def setup():
|
||||
logger.error(f"Error saving proxy auth bypass setting: {e}", exc_info=True)
|
||||
|
||||
# Automatically log in the user after setup
|
||||
logger.info(f"User '{username}' created successfully during setup. Creating session.")
|
||||
logger.debug(f"User '{username}' created successfully during setup. Creating session.")
|
||||
session_token = create_session(username)
|
||||
# Explicitly set username in Flask session - might not be needed if using token correctly
|
||||
# session['username'] = username
|
||||
|
||||
@@ -135,7 +135,7 @@ def check_hourly_reset():
|
||||
|
||||
# Only reset at the top of the hour (00 minute mark)
|
||||
if current_time.minute == 0:
|
||||
logger.info(f"Hour changed to {current_hour}:00, resetting hourly API caps")
|
||||
logger.debug(f"Hour changed to {current_hour}:00, resetting hourly API caps")
|
||||
reset_hourly_caps()
|
||||
last_hour_checked = current_hour
|
||||
|
||||
@@ -462,7 +462,7 @@ def reset_hourly_caps() -> bool:
|
||||
try:
|
||||
db = get_database()
|
||||
db.reset_hourly_caps()
|
||||
logger.info("Reset all hourly API caps")
|
||||
logger.debug("Reset all hourly API caps")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error resetting hourly caps: {e}")
|
||||
|
||||
@@ -1075,19 +1075,12 @@ class HuntarrDatabase:
|
||||
|
||||
def get_swaparr_removed_items(self, app_name: str) -> Dict[str, Any]:
|
||||
"""Get removed items data for a specific Swaparr app"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.execute('SELECT removed_items FROM swaparr_state WHERE app_name = ?', (app_name,))
|
||||
row = cursor.fetchone()
|
||||
return json.loads(row[0]) if row and row[0] else {}
|
||||
data = self.get_swaparr_state_data(app_name, "removed_items")
|
||||
return data if data is not None else {}
|
||||
|
||||
def set_swaparr_removed_items(self, app_name: str, removed_items: Dict[str, Any]):
|
||||
"""Set removed items data for a specific Swaparr app"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute('''
|
||||
INSERT OR REPLACE INTO swaparr_state (app_name, removed_items, updated_at)
|
||||
VALUES (?, ?, CURRENT_TIMESTAMP)
|
||||
''', (app_name, json.dumps(removed_items)))
|
||||
conn.commit()
|
||||
self.set_swaparr_state_data(app_name, "removed_items", removed_items)
|
||||
|
||||
# Reset Request Management Methods (replaces file-based reset system)
|
||||
|
||||
|
||||
@@ -101,6 +101,9 @@ def setup_main_logger():
|
||||
current_logger.handlers.clear()
|
||||
current_logger.setLevel(use_log_level)
|
||||
|
||||
# Prevent propagation to root logger to avoid duplicate messages
|
||||
current_logger.propagate = False
|
||||
|
||||
# Create console handler
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler.setLevel(use_log_level)
|
||||
|
||||
Reference in New Issue
Block a user