This commit is contained in:
Admin9705
2025-04-12 01:12:52 -04:00
parent 5d8ae30f4d
commit 83fb14efa5
19 changed files with 1864 additions and 287 deletions

21
docker-compose.yml Normal file
View File

@@ -0,0 +1,21 @@
version: '3'
services:
huntarr:
build:
context: .
dockerfile: Dockerfile
container_name: huntarr
ports:
- "9705:9705"
volumes:
- huntarr-config:/config
- /tmp/huntarr-logs:/tmp/huntarr-logs
- /tmp/huntarr-state:/tmp/huntarr-state
environment:
- TZ=America/New_York
restart: unless-stopped
volumes:
huntarr-config:
name: huntarr-config

View File

@@ -57,43 +57,54 @@ def arr_request(endpoint: str, method: str = "GET", data: Dict = None) -> Option
logger.error(f"API request error: {e}")
return None
def check_connection() -> bool:
def check_connection(app_type: str = None) -> bool:
"""
Check if we can connect to the Arr API.
Returns True if connection is successful, False otherwise.
Args:
app_type: Optional app type to check connection for (sonarr, radarr, etc.).
If None, uses the global APP_TYPE.
"""
# Determine which app type to use
current_app_type = app_type or APP_TYPE
# Get API credentials for the specified app type
from primary import keys_manager
api_url, api_key = keys_manager.get_api_keys(current_app_type)
# First explicitly check if API URL and Key are configured
if not API_URL:
logger.error("API URL is not configured in settings. Please set it up in the Settings page.")
if not api_url:
logger.error(f"API URL is not configured for {current_app_type} in settings. Please set it up in the Settings page.")
return False
if not API_KEY:
logger.error("API Key is not configured in settings. Please set it up in the Settings page.")
if not api_key:
logger.error(f"API Key is not configured for {current_app_type} in settings. Please set it up in the Settings page.")
return False
# Log what we're attempting to connect to
logger.debug(f"Attempting to connect to {APP_TYPE.title()} at {API_URL}")
logger.debug(f"Attempting to connect to {current_app_type.title()} at {api_url}")
# Try to access the system/status endpoint which should be available on all Arr applications
try:
endpoint = "system/status"
# Determine the API version based on app type
if APP_TYPE == "sonarr":
if current_app_type == "sonarr":
api_base = "api/v3"
elif APP_TYPE == "radarr":
elif current_app_type == "radarr":
api_base = "api/v3"
elif APP_TYPE == "lidarr":
elif current_app_type == "lidarr":
api_base = "api/v1"
elif APP_TYPE == "readarr":
elif current_app_type == "readarr":
api_base = "api/v1"
else:
# Default to v3 for unknown app types
api_base = "api/v3"
url = f"{API_URL}/{api_base}/{endpoint}"
url = f"{api_url}/{api_base}/{endpoint}"
headers = {
"X-Api-Key": API_KEY,
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
@@ -101,14 +112,14 @@ def check_connection() -> bool:
response = session.get(url, headers=headers, timeout=API_TIMEOUT)
if response.status_code == 401:
logger.error(f"Connection test failed: 401 Client Error: Unauthorized - Invalid API key for {APP_TYPE.title()}")
logger.error(f"Connection test failed: 401 Client Error: Unauthorized - Invalid API key for {current_app_type.title()}")
return False
response.raise_for_status()
logger.info(f"Connection to {APP_TYPE.title()} at {API_URL} successful")
logger.info(f"Connection to {current_app_type.title()} at {api_url} successful")
return True
except requests.exceptions.RequestException as e:
logger.error(f"Connection test failed: {e}")
logger.error(f"Connection test failed for {current_app_type}: {e}")
return False
def wait_for_command(command_id: int):

View File

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
Missing Albums Processing for Lidarr
Handles searching for missing albums in Lidarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger, debug_log
from primary.config import MONITORED_ONLY
from primary import settings_manager
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("lidarr")
def process_missing_albums(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process albums that are missing from the library.
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("lidarr")
# Get the current value directly at the start of processing
HUNT_MISSING_ALBUMS = settings_manager.get_setting("huntarr", "hunt_missing_albums", 1)
RANDOM_MISSING = settings_manager.get_setting("advanced", "random_missing", True)
# Get app-specific state file
PROCESSED_MISSING_FILE = get_state_file_path("lidarr", "processed_missing")
logger.info("=== Checking for Missing Albums ===")
# Skip if HUNT_MISSING_ALBUMS is set to 0
if HUNT_MISSING_ALBUMS <= 0:
logger.info("HUNT_MISSING_ALBUMS is set to 0, skipping missing albums")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting missing albums. Aborting...")
return False
# Placeholder for API implementation - would check for missing albums here
logger.info("Lidarr missing albums functionality not yet implemented")
return False # No processing done in placeholder implementation

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""
Quality Upgrade Processing for Lidarr
Handles searching for tracks/albums that need quality upgrades in Lidarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger
from primary.config import MONITORED_ONLY
from primary import settings_manager
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("lidarr")
def get_current_upgrade_limit():
"""Get the current HUNT_UPGRADE_TRACKS value directly from config"""
return settings_manager.get_setting("huntarr", "hunt_upgrade_tracks", 0)
def process_cutoff_upgrades(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process tracks that need quality upgrades (cutoff unmet).
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("lidarr")
# Get the current value directly at the start of processing
HUNT_UPGRADE_TRACKS = get_current_upgrade_limit()
RANDOM_UPGRADES = settings_manager.get_setting("advanced", "random_upgrades", True)
# Get app-specific state file
PROCESSED_UPGRADE_FILE = get_state_file_path("lidarr", "processed_upgrades")
logger.info("=== Checking for Quality Upgrades (Cutoff Unmet) ===")
# Skip if HUNT_UPGRADE_TRACKS is set to 0
if HUNT_UPGRADE_TRACKS <= 0:
logger.info("HUNT_UPGRADE_TRACKS is set to 0, skipping quality upgrades")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting quality upgrades. Aborting...")
return False
# Placeholder for API implementation - would check for quality upgrades here
logger.info("Lidarr quality upgrades functionality not yet implemented")
return False # No processing done in placeholder implementation

View File

@@ -0,0 +1,8 @@
"""
Radarr app module for Huntarr
Contains functionality for missing movies and quality upgrades in Radarr
"""
# Module exports
from primary.apps.radarr.missing import process_missing_movies
from primary.apps.radarr.upgrade import process_cutoff_upgrades

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
Missing Movies Processing for Radarr
Handles searching for missing movies in Radarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger, debug_log
from primary.config import MONITORED_ONLY
from primary import settings_manager
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("radarr")
def process_missing_movies(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process movies that are missing from the library.
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("radarr")
# Get the current value directly at the start of processing
HUNT_MISSING_MOVIES = settings_manager.get_setting("huntarr", "hunt_missing_movies", 1)
RANDOM_MISSING = settings_manager.get_setting("advanced", "random_missing", True)
# Get app-specific state file
PROCESSED_MISSING_FILE = get_state_file_path("radarr", "processed_missing")
logger.info("=== Checking for Missing Movies ===")
# Skip if HUNT_MISSING_MOVIES is set to 0
if HUNT_MISSING_MOVIES <= 0:
logger.info("HUNT_MISSING_MOVIES is set to 0, skipping missing movies")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting missing movies. Aborting...")
return False
# Placeholder for API implementation - would check for missing movies here
logger.info("Radarr missing movies functionality not yet implemented")
return False # No processing done in placeholder implementation

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""
Quality Upgrade Processing for Radarr
Handles searching for movies that need quality upgrades in Radarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger
from primary.config import MONITORED_ONLY
from primary import settings_manager
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("radarr")
def get_current_upgrade_limit():
"""Get the current HUNT_UPGRADE_MOVIES value directly from config"""
return settings_manager.get_setting("huntarr", "hunt_upgrade_movies", 0)
def process_cutoff_upgrades(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process movies that need quality upgrades (cutoff unmet).
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("radarr")
# Get the current value directly at the start of processing
HUNT_UPGRADE_MOVIES = get_current_upgrade_limit()
RANDOM_UPGRADES = settings_manager.get_setting("advanced", "random_upgrades", True)
# Get app-specific state file
PROCESSED_UPGRADE_FILE = get_state_file_path("radarr", "processed_upgrades")
logger.info("=== Checking for Quality Upgrades (Cutoff Unmet) ===")
# Skip if HUNT_UPGRADE_MOVIES is set to 0
if HUNT_UPGRADE_MOVIES <= 0:
logger.info("HUNT_UPGRADE_MOVIES is set to 0, skipping quality upgrades")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting quality upgrades. Aborting...")
return False
# Placeholder for API implementation - would check for quality upgrades here
logger.info("Radarr quality upgrades functionality not yet implemented")
return False # No processing done in placeholder implementation

View File

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
Missing Books Processing for Readarr
Handles searching for missing books in Readarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger, debug_log
from primary.config import MONITORED_ONLY
from primary import settings_manager
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("readarr")
def process_missing_books(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process books that are missing from the library.
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("readarr")
# Get the current value directly at the start of processing
HUNT_MISSING_BOOKS = settings_manager.get_setting("huntarr", "hunt_missing_books", 1)
RANDOM_MISSING = settings_manager.get_setting("advanced", "random_missing", True)
# Get app-specific state file
PROCESSED_MISSING_FILE = get_state_file_path("readarr", "processed_missing")
logger.info("=== Checking for Missing Books ===")
# Skip if HUNT_MISSING_BOOKS is set to 0
if HUNT_MISSING_BOOKS <= 0:
logger.info("HUNT_MISSING_BOOKS is set to 0, skipping missing books")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting missing books. Aborting...")
return False
# Placeholder for API implementation - would check for missing books here
logger.info("Readarr missing books functionality not yet implemented")
return False # No processing done in placeholder implementation

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""
Quality Upgrade Processing for Readarr
Handles searching for books that need quality upgrades in Readarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger
from primary.config import MONITORED_ONLY
from primary import settings_manager
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("readarr")
def get_current_upgrade_limit():
"""Get the current HUNT_UPGRADE_BOOKS value directly from config"""
return settings_manager.get_setting("huntarr", "hunt_upgrade_books", 0)
def process_cutoff_upgrades(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process books that need quality upgrades (cutoff unmet).
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("readarr")
# Get the current value directly at the start of processing
HUNT_UPGRADE_BOOKS = get_current_upgrade_limit()
RANDOM_UPGRADES = settings_manager.get_setting("advanced", "random_upgrades", True)
# Get app-specific state file
PROCESSED_UPGRADE_FILE = get_state_file_path("readarr", "processed_upgrades")
logger.info("=== Checking for Quality Upgrades (Cutoff Unmet) ===")
# Skip if HUNT_UPGRADE_BOOKS is set to 0
if HUNT_UPGRADE_BOOKS <= 0:
logger.info("HUNT_UPGRADE_BOOKS is set to 0, skipping quality upgrades")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting quality upgrades. Aborting...")
return False
# Placeholder for API implementation - would check for quality upgrades here
logger.info("Readarr quality upgrades functionality not yet implemented")
return False # No processing done in placeholder implementation

View File

View File

@@ -0,0 +1,248 @@
#!/usr/bin/env python3
"""
Missing Episode Processing for Sonarr
Handles searching for missing episodes in Sonarr
"""
import random
import time
import datetime
import os
import json
from typing import List, Callable, Dict, Optional
from primary.utils.logger import get_logger, debug_log
from primary.config import (
MONITORED_ONLY,
SKIP_FUTURE_EPISODES,
SKIP_SERIES_REFRESH
)
from primary import settings_manager
from primary.api import (
get_episodes_for_series,
refresh_series,
episode_search_episodes,
get_series_with_missing_episodes,
arr_request,
get_missing_episodes
)
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("sonarr")
def get_missing_total_pages(pageSize: int = 200) -> int:
"""
Calculates the total number of pages for missing episodes.
Returns the number of pages, or 0 if no missing episodes.
Returns -1 if there was an API error.
"""
response = get_missing_episodes(pageSize=1) # Get just 1 to get total count
if not response:
logger.error("Failed to get missing episodes data from API")
return -1
if "totalRecords" not in response:
logger.error("Missing totalRecords in API response")
return -1
total_records = response.get("totalRecords", 0)
if not isinstance(total_records, int) or total_records < 1:
logger.info("No missing episodes found")
return 0
# Calculate total pages based on pageSize
total_pages = (total_records + pageSize - 1) // pageSize
logger.debug(f"Total missing episodes: {total_records}, pages: {total_pages}")
return max(total_pages, 1)
def get_missing(page: int) -> Optional[Dict]:
"""Get a page of missing episodes."""
return get_missing_episodes(pageSize=200)
def process_missing_episodes(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process episodes that are missing from the library.
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("sonarr")
# Get the current value directly at the start of processing
HUNT_MISSING_SHOWS = settings_manager.get_setting("huntarr", "hunt_missing_shows", 1)
RANDOM_MISSING = settings_manager.get_setting("advanced", "random_missing", True)
# Get app-specific state file
PROCESSED_MISSING_FILE = get_state_file_path("sonarr", "processed_missing")
logger.info("=== Checking for Missing Episodes ===")
# Skip if HUNT_MISSING_SHOWS is set to 0
if HUNT_MISSING_SHOWS <= 0:
logger.info("HUNT_MISSING_SHOWS is set to 0, skipping missing episodes")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting missing episodes. Aborting...")
return False
total_pages = get_missing_total_pages()
# If we got an error (-1) from the API request, return early
if total_pages < 0:
logger.error("Failed to get missing data due to API error. Skipping this cycle.")
return False
if total_pages == 0:
logger.info("No missing episodes found.")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal after getting total pages. Aborting...")
return False
logger.info(f"Found {total_pages} total pages of missing episodes.")
processed_missing_ids = load_processed_ids(PROCESSED_MISSING_FILE)
episodes_processed = 0
processing_done = False
# Use RANDOM_MISSING setting
should_use_random = RANDOM_MISSING
logger.info(f"Using {'random' if should_use_random else 'sequential'} selection for missing episodes (RANDOM_MISSING={should_use_random})")
# Initialize page variable for both modes
page = 1
while True:
# Check for restart signal at the beginning of each page processing
if restart_cycle_flag():
logger.info("🔄 Received restart signal at start of page loop. Aborting...")
break
# Check again to make sure we're using the current limit
# This ensures if settings changed during processing, we use the new value
current_limit = settings_manager.get_setting("huntarr", "hunt_missing_shows", 1)
if episodes_processed >= current_limit:
logger.info(f"Reached HUNT_MISSING_SHOWS={current_limit} for this cycle.")
break
# If random selection is enabled, pick a random page each iteration
if should_use_random and total_pages > 1:
page = random.randint(1, total_pages)
# If sequential and we've reached the end, we're done
elif not should_use_random and page > total_pages:
break
logger.info(f"Retrieving missing episodes (page={page} of {total_pages})...")
missing_data = get_missing(page)
# Check for restart signal after retrieving page
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal after retrieving page {page}. Aborting...")
break
if not missing_data or "records" not in missing_data:
logger.error(f"ERROR: Unable to retrieve missing data from Sonarr on page {page}.")
# In sequential mode, try the next page
if not should_use_random:
page += 1
continue
else:
break
episodes = missing_data["records"]
total_eps = len(episodes)
logger.info(f"Found {total_eps} episodes on page {page} that are missing.")
# Randomize or sequential indices within the page
indices = list(range(total_eps))
if should_use_random:
random.shuffle(indices)
# Check for restart signal before processing episodes
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal before processing episodes on page {page}. Aborting...")
break
for idx in indices:
# Check for restart signal before each episode
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal during episode processing. Aborting...")
break
# Check again for the current limit in case it was changed during processing
current_limit = settings_manager.get_setting("huntarr", "hunt_missing_shows", 1)
if episodes_processed >= current_limit:
break
ep_obj = episodes[idx]
episode_id = ep_obj.get("id")
if not episode_id or episode_id in processed_missing_ids:
continue
series_id = ep_obj.get("seriesId")
season_num = ep_obj.get("seasonNumber")
ep_num = ep_obj.get("episodeNumber")
ep_title = ep_obj.get("title", "Unknown Episode Title")
series_title = ep_obj.get("seriesTitle", None)
if not series_title:
# fallback: request the series
series_data = arr_request(f"series/{series_id}", method="GET")
if series_data:
series_title = series_data.get("title", "Unknown Series")
else:
series_title = "Unknown Series"
logger.info(f"Processing missing episode for \"{series_title}\" - S{season_num}E{ep_num} - \"{ep_title}\" (Episode ID: {episode_id})")
# Search for the episode (missing)
logger.info(" - Searching for missing episode...")
search_res = episode_search_episodes([episode_id])
if search_res:
logger.info(f"Search command completed successfully.")
# Mark processed
save_processed_id(PROCESSED_MISSING_FILE, episode_id)
episodes_processed += 1
processing_done = True
# Log with the current limit, not the initial one
current_limit = settings_manager.get_setting("huntarr", "hunt_missing_shows", 1)
logger.info(f"Processed {episodes_processed}/{current_limit} missing episodes this cycle.")
else:
logger.warning(f"WARNING: Search command failed for episode ID {episode_id}.")
continue
# Check for restart signal after processing an episode
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal after processing episode {ep_title}. Aborting...")
break
# Move to the next page if using sequential mode
if not should_use_random:
page += 1
# In random mode, we just handle one random page this iteration,
# then check if we've processed enough episodes or continue to another random page
# Check for restart signal after processing a page
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal after processing page {page}. Aborting...")
break
# Log with the current limit, not the initial one
current_limit = settings_manager.get_setting("huntarr", "hunt_missing_shows", 1)
logger.info(f"Completed processing {episodes_processed} missing episodes for this cycle.")
truncate_processed_list(PROCESSED_MISSING_FILE)
return processing_done

View File

@@ -0,0 +1,273 @@
#!/usr/bin/env python3
"""
Quality Upgrade Processing for Sonarr
Handles searching for episodes that need quality upgrades in Sonarr
"""
import random
import time
import datetime
import importlib
from typing import Callable
from primary.utils.logger import get_logger
from primary.config import (
MONITORED_ONLY,
SKIP_FUTURE_EPISODES,
SKIP_SERIES_REFRESH
)
from primary import settings_manager
from primary.api import get_cutoff_unmet, get_cutoff_unmet_total_pages, refresh_series, episode_search_episodes, arr_request
from primary.state import load_processed_ids, save_processed_id, truncate_processed_list, get_state_file_path
# Get app-specific logger
logger = get_logger("sonarr")
def get_current_upgrade_limit():
"""Get the current HUNT_UPGRADE_EPISODES value directly from config"""
return settings_manager.get_setting("huntarr", "hunt_upgrade_episodes", 0)
def process_cutoff_upgrades(restart_cycle_flag: Callable[[], bool] = lambda: False) -> bool:
"""
Process episodes that need quality upgrades (cutoff unmet).
Args:
restart_cycle_flag: Function that returns whether to restart the cycle
Returns:
True if any processing was done, False otherwise
"""
# Reload settings to ensure the latest values are used
from primary.config import refresh_settings
refresh_settings("sonarr")
# Get the current value directly at the start of processing
HUNT_UPGRADE_EPISODES = get_current_upgrade_limit()
RANDOM_UPGRADES = settings_manager.get_setting("advanced", "random_upgrades", True)
# Get app-specific state file
PROCESSED_UPGRADE_FILE = get_state_file_path("sonarr", "processed_upgrades")
logger.info("=== Checking for Quality Upgrades (Cutoff Unmet) ===")
# Skip if HUNT_UPGRADE_EPISODES is set to 0
if HUNT_UPGRADE_EPISODES <= 0:
logger.info("HUNT_UPGRADE_EPISODES is set to 0, skipping quality upgrades")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal before starting quality upgrades. Aborting...")
return False
total_pages = get_cutoff_unmet_total_pages()
# If we got an error (-1) from the API request, return early
if total_pages < 0:
logger.error("Failed to get cutoff unmet data due to API error. Skipping this cycle.")
return False
if total_pages == 0:
logger.info("No episodes found that need quality upgrades.")
return False
# Check for restart signal
if restart_cycle_flag():
logger.info("🔄 Received restart signal after getting total pages. Aborting...")
return False
logger.info(f"Found {total_pages} total pages of episodes that need quality upgrades.")
processed_upgrade_ids = load_processed_ids(PROCESSED_UPGRADE_FILE)
episodes_processed = 0
processing_done = False
# Get current date for future episode filtering
current_date = datetime.datetime.now().date()
# Use RANDOM_UPGRADES setting
should_use_random = RANDOM_UPGRADES
logger.info(f"Using {'random' if should_use_random else 'sequential'} selection for quality upgrades (RANDOM_UPGRADES={should_use_random})")
# Initialize page variable for both modes
page = 1
while True:
# Check for restart signal at the beginning of each page processing
if restart_cycle_flag():
logger.info("🔄 Received restart signal at start of page loop. Aborting...")
break
# Check again to make sure we're using the current limit
# This ensures if settings changed during processing, we use the new value
current_limit = get_current_upgrade_limit()
if episodes_processed >= current_limit:
logger.info(f"Reached HUNT_UPGRADE_EPISODES={current_limit} for this cycle.")
break
# If random selection is enabled, pick a random page each iteration
if should_use_random and total_pages > 1:
page = random.randint(1, total_pages)
# If sequential and we've reached the end, we're done
elif not should_use_random and page > total_pages:
break
logger.info(f"Retrieving cutoff-unmet episodes (page={page} of {total_pages})...")
cutoff_data = get_cutoff_unmet(page)
# Check for restart signal after retrieving page
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal after retrieving page {page}. Aborting...")
break
if not cutoff_data or "records" not in cutoff_data:
logger.error(f"ERROR: Unable to retrieve cutoffunmet data from Sonarr on page {page}.")
# In sequential mode, try the next page
if not should_use_random:
page += 1
continue
else:
break
episodes = cutoff_data["records"]
total_eps = len(episodes)
logger.info(f"Found {total_eps} episodes on page {page} that need quality upgrades.")
# Randomize or sequential indices within the page
indices = list(range(total_eps))
if should_use_random:
random.shuffle(indices)
# Check for restart signal before processing episodes
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal before processing episodes on page {page}. Aborting...")
break
for idx in indices:
# Check for restart signal before each episode
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal during episode processing. Aborting...")
break
# Check again for the current limit in case it was changed during processing
current_limit = get_current_upgrade_limit()
if episodes_processed >= current_limit:
break
ep_obj = episodes[idx]
episode_id = ep_obj.get("id")
if not episode_id or episode_id in processed_upgrade_ids:
continue
series_id = ep_obj.get("seriesId")
season_num = ep_obj.get("seasonNumber")
ep_num = ep_obj.get("episodeNumber")
ep_title = ep_obj.get("title", "Unknown Episode Title")
series_title = ep_obj.get("seriesTitle", None)
if not series_title:
# fallback: request the series
series_data = arr_request(f"series/{series_id}", method="GET")
if series_data:
series_title = series_data.get("title", "Unknown Series")
else:
series_title = "Unknown Series"
# Skip future episodes if SKIP_FUTURE_EPISODES is enabled
if SKIP_FUTURE_EPISODES:
air_date_str = ep_obj.get("airDateUtc")
if air_date_str:
try:
# Parse the UTC date string
air_date = datetime.datetime.fromisoformat(air_date_str.replace('Z', '+00:00')).date()
if air_date > current_date:
logger.info(f"Skipping future episode '{series_title}' - S{season_num}E{ep_num} - '{ep_title}' (airs on {air_date})")
continue
except (ValueError, TypeError):
# If date parsing fails, proceed with the episode
pass
# Check for restart signal before processing this episode
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal before processing episode {ep_title}. Aborting...")
break
logger.info(f"Processing upgrade for \"{series_title}\" - S{season_num}E{ep_num} - \"{ep_title}\" (Episode ID: {episode_id})")
# If MONITORED_ONLY, ensure both series & episode are monitored
if MONITORED_ONLY:
ep_monitored = ep_obj.get("monitored", False)
# Check if series info is already included
if "series" in ep_obj and isinstance(ep_obj["series"], dict):
series_monitored = ep_obj["series"].get("monitored", False)
else:
# retrieve the series
series_data = arr_request(f"series/{series_id}", "GET")
series_monitored = series_data.get("monitored", False) if series_data else False
if not ep_monitored or not series_monitored:
logger.info("Skipping unmonitored episode or series.")
continue
# Check for restart signal before refreshing
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal before refreshing series for {ep_title}. Aborting...")
break
# Refresh the series only if SKIP_SERIES_REFRESH is not enabled
if not SKIP_SERIES_REFRESH:
logger.info(" - Refreshing series information...")
refresh_res = refresh_series(series_id)
if not refresh_res:
logger.warning("WARNING: Refresh command failed. Skipping this episode.")
continue
logger.info(f"Refresh command completed successfully.")
else:
logger.info(" - Skipping series refresh (SKIP_SERIES_REFRESH=true)")
# Check for restart signal before searching
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal before searching for {ep_title}. Aborting...")
break
# Search for the episode (upgrade)
logger.info(" - Searching for quality upgrade...")
search_res = episode_search_episodes([episode_id])
if search_res:
logger.info(f"Search command completed successfully.")
# Mark processed
save_processed_id(PROCESSED_UPGRADE_FILE, episode_id)
episodes_processed += 1
processing_done = True
# Log with the current limit, not the initial one
current_limit = get_current_upgrade_limit()
logger.info(f"Processed {episodes_processed}/{current_limit} upgrade episodes this cycle.")
else:
logger.warning(f"WARNING: Search command failed for episode ID {episode_id}.")
continue
# Check for restart signal after processing an episode
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal after processing episode {ep_title}. Aborting...")
break
# Move to the next page if using sequential mode
if not should_use_random:
page += 1
# In random mode, we just handle one random page this iteration,
# then check if we've processed enough episodes or continue to another random page
# Check for restart signal after processing a page
if restart_cycle_flag():
logger.info(f"🔄 Received restart signal after processing page {page}. Aborting...")
break
# Log with the current limit, not the initial one
current_limit = get_current_upgrade_limit()
logger.info(f"Completed processing {episodes_processed} upgrade episodes for this cycle.")
truncate_processed_list(PROCESSED_UPGRADE_FILE)
return processing_done

View File

@@ -117,19 +117,79 @@ HUNT_MODE = determine_hunt_mode()
# Ensure RANDOM_UPGRADES is dynamically reloaded at the start of each cycle
# Updated logic to reload settings before processing upgrades
def refresh_settings():
"""Refresh configuration settings from the settings manager."""
global RANDOM_UPGRADES
# Force reload the settings_manager module to get fresh values from disk
from primary import settings_manager
importlib.reload(settings_manager)
# Reload RANDOM_UPGRADES from settings
def refresh_settings(app_type: str = None) -> None:
"""
Reload all settings from the settings file.
Args:
app_type: Optional app type to refresh settings for (sonarr, radarr, etc.)
"""
global API_URL, API_KEY, SLEEP_DURATION, MONITORED_ONLY, HUNT_MISSING_SHOWS, HUNT_UPGRADE_EPISODES
global STATE_RESET_INTERVAL_HOURS, API_TIMEOUT, COMMAND_WAIT_DELAY, COMMAND_WAIT_ATTEMPTS
global MINIMUM_DOWNLOAD_QUEUE_SIZE, SKIP_FUTURE_EPISODES, SKIP_SERIES_REFRESH
global RANDOM_MISSING, RANDOM_UPGRADES, DEBUG_MODE
# If app_type is provided, temporarily override APP_TYPE
original_app_type = APP_TYPE
if app_type:
os.environ["APP_TYPE"] = app_type
# Settings that apply to all apps
SLEEP_DURATION = settings_manager.get_setting("huntarr", "sleep_duration", 900)
STATE_RESET_INTERVAL_HOURS = settings_manager.get_setting("huntarr", "state_reset_interval_hours", 168)
RANDOM_MISSING = settings_manager.get_setting("advanced", "random_missing", True)
RANDOM_UPGRADES = settings_manager.get_setting("advanced", "random_upgrades", True)
# Log the refreshed settings
logger.debug(f"Settings refreshed: RANDOM_UPGRADES={RANDOM_UPGRADES}")
MONITORED_ONLY = settings_manager.get_setting("huntarr", "monitored_only", True)
API_TIMEOUT = settings_manager.get_setting("advanced", "api_timeout", 60)
COMMAND_WAIT_DELAY = settings_manager.get_setting("advanced", "command_wait_delay", 1)
COMMAND_WAIT_ATTEMPTS = settings_manager.get_setting("advanced", "command_wait_attempts", 600)
MINIMUM_DOWNLOAD_QUEUE_SIZE = settings_manager.get_setting("advanced", "minimum_download_queue_size", -1)
DEBUG_MODE = settings_manager.get_setting("advanced", "debug_mode", False)
LOG_REFRESH_INTERVAL_SECONDS = settings_manager.get_setting("huntarr", "log_refresh_interval_seconds", 30)
# Get the actual app type we're refreshing settings for
current_app_type = os.environ.get("APP_TYPE", original_app_type)
# App-specific settings based on APP_TYPE
if current_app_type == "sonarr":
HUNT_MISSING_SHOWS = settings_manager.get_setting("huntarr", "hunt_missing_shows", 1)
HUNT_UPGRADE_EPISODES = settings_manager.get_setting("huntarr", "hunt_upgrade_episodes", 0)
SKIP_FUTURE_EPISODES = settings_manager.get_setting("huntarr", "skip_future_episodes", True)
SKIP_SERIES_REFRESH = settings_manager.get_setting("huntarr", "skip_series_refresh", False)
elif current_app_type == "radarr":
HUNT_MISSING_MOVIES = settings_manager.get_setting("huntarr", "hunt_missing_movies", 1)
HUNT_UPGRADE_MOVIES = settings_manager.get_setting("huntarr", "hunt_upgrade_movies", 0)
SKIP_FUTURE_RELEASES = settings_manager.get_setting("huntarr", "skip_future_releases", True)
SKIP_MOVIE_REFRESH = settings_manager.get_setting("huntarr", "skip_movie_refresh", False)
elif current_app_type == "lidarr":
HUNT_MISSING_ALBUMS = settings_manager.get_setting("huntarr", "hunt_missing_albums", 1)
HUNT_UPGRADE_TRACKS = settings_manager.get_setting("huntarr", "hunt_upgrade_tracks", 0)
SKIP_FUTURE_RELEASES = settings_manager.get_setting("huntarr", "skip_future_releases", True)
SKIP_ARTIST_REFRESH = settings_manager.get_setting("huntarr", "skip_artist_refresh", False)
elif current_app_type == "readarr":
HUNT_MISSING_BOOKS = settings_manager.get_setting("huntarr", "hunt_missing_books", 1)
HUNT_UPGRADE_BOOKS = settings_manager.get_setting("huntarr", "hunt_upgrade_books", 0)
SKIP_FUTURE_RELEASES = settings_manager.get_setting("huntarr", "skip_future_releases", True)
SKIP_AUTHOR_REFRESH = settings_manager.get_setting("huntarr", "skip_author_refresh", False)
# For API credentials, use the API credentials for the specific app
if app_type:
api_url, api_key = keys_manager.get_api_keys(app_type)
# Only set these variables temporarily if app_type is provided
os.environ[f"{app_type.upper()}_API_URL"] = api_url
os.environ[f"{app_type.upper()}_API_KEY"] = api_key
else:
# For backwards compatibility, still set the global variables
api_url, api_key = keys_manager.get_api_keys(APP_TYPE)
API_URL = api_url
API_KEY = api_key
# Restore original APP_TYPE if we temporarily changed it
if app_type and app_type != original_app_type:
os.environ["APP_TYPE"] = original_app_type
def log_configuration(logger):
"""Log the current configuration settings"""

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
"""
Huntarr - Main entry point for the application
Supports multiple Arr applications
Supports multiple Arr applications running concurrently
"""
import time
@@ -11,186 +11,396 @@ import socket
import signal
import importlib
import logging
import threading
from typing import Dict, List, Optional
# Set up logging first to avoid circular imports
from primary.utils.logger import setup_logger
logger = setup_logger()
# Now import the rest of the modules
from primary.config import HUNT_MODE, SLEEP_DURATION, MINIMUM_DOWNLOAD_QUEUE_SIZE, APP_TYPE, log_configuration, refresh_settings
from primary.config import SLEEP_DURATION, MINIMUM_DOWNLOAD_QUEUE_SIZE, log_configuration, refresh_settings
from primary.state import check_state_reset, calculate_reset_time
from primary.api import get_download_queue_size
from primary.utils.app_utils import get_ip_address # Use centralized function
from primary.utils.app_utils import get_ip_address
from primary import keys_manager
# Flag to indicate if cycle should restart
restart_cycle = False
# Flags to indicate if cycles should restart for each app
restart_cycles = {
"sonarr": False,
"radarr": False,
"lidarr": False,
"readarr": False
}
# Track active threads
app_threads: Dict[str, threading.Thread] = {}
stop_threads = False
def signal_handler(signum, frame):
"""Handle signals from the web UI for cycle restart"""
global restart_cycle
if signum == signal.SIGUSR1:
logger.warning("⚠️ Received restart signal from web UI. Immediately aborting current operations... ⚠️")
restart_cycle = True
# Extract the app type from the signal data if available
app_type = os.environ.get("RESTART_APP_TYPE", "sonarr")
logger.info(f"🔄 Received restart signal for {app_type}")
restart_cycles[app_type] = True
# Register signal handler for SIGUSR1
signal.signal(signal.SIGUSR1, signal_handler)
# Removed duplicate get_ip_address(); now using get_ip_address() from app_utils
def force_reload_all_modules():
"""Force reload of all relevant modules to ensure fresh settings"""
try:
# Force reload the settings_manager first
from primary import settings_manager
importlib.reload(settings_manager)
# Then reload config which depends on settings_manager
from primary import config
importlib.reload(config)
# Reload app-specific modules
if APP_TYPE == "sonarr":
from primary import missing
importlib.reload(missing)
from primary import upgrade
importlib.reload(upgrade)
# TODO: Add other app type module reloading when implemented
# Call the refresh function to ensure settings are updated
config.refresh_settings()
# Log the reloaded settings for verification
logger.info("Settings reloaded from huntarr.json file")
config.log_configuration(logger)
return True
except Exception as e:
importlib.reload(sys.modules['primary.config'])
logger.debug("Reloaded primary.config module")
except (KeyError, ImportError) as e:
logger.error(f"Error reloading modules: {e}")
return False
def main_loop() -> None:
"""Main processing loop for Huntarr"""
global restart_cycle
logger.info(f"=== Huntarr [{APP_TYPE.title()} Edition] Starting ===")
def app_specific_loop(app_type: str) -> None:
"""
Main processing loop for a specific Arr application
Args:
app_type: The type of Arr application (sonarr, radarr, lidarr, readarr)
"""
global restart_cycles
# Get app-specific logger
from primary.utils.logger import get_logger
app_logger = get_logger(app_type)
app_logger.info(f"=== Huntarr [{app_type.title()} Edition] Starting ===")
server_ip = get_ip_address()
logger.info(f"Web interface available at http://{server_ip}:9705")
while True:
restart_cycle = False
app_logger.info(f"Web interface available at http://{server_ip}:9705")
# Import necessary modules based on app type
if app_type == "sonarr":
from primary.apps.sonarr.missing import process_missing_episodes
from primary.apps.sonarr.upgrade import process_cutoff_upgrades
from primary.api import get_download_queue_size as sonarr_get_download_queue_size
elif app_type == "radarr":
from primary.apps.radarr.missing import process_missing_movies
from primary.apps.radarr.upgrade import process_cutoff_upgrades
# Placeholder for Radarr-specific API functions
sonarr_get_download_queue_size = lambda: 0 # Placeholder
elif app_type == "lidarr":
from primary.apps.lidarr.missing import process_missing_albums
from primary.apps.lidarr.upgrade import process_cutoff_upgrades
# Placeholder for Lidarr-specific API functions
sonarr_get_download_queue_size = lambda: 0 # Placeholder
elif app_type == "readarr":
from primary.apps.readarr.missing import process_missing_books
from primary.apps.readarr.upgrade import process_cutoff_upgrades
# Placeholder for Readarr-specific API functions
sonarr_get_download_queue_size = lambda: 0 # Placeholder
# Get API keys for this app
api_url, api_key = keys_manager.get_api_keys(app_type)
# Set the API credentials for this thread context
os.environ[f"{app_type.upper()}_API_URL"] = api_url
os.environ[f"{app_type.upper()}_API_KEY"] = api_key
while not stop_threads:
restart_cycles[app_type] = False
# Always reload settings from huntarr.json at the start of each cycle
refresh_settings()
check_state_reset()
logger.info(f"=== Starting Huntarr cycle ===")
from primary.api import check_connection
refresh_settings(app_type)
check_state_reset(app_type)
app_logger.info(f"=== Starting Huntarr {app_type} cycle ===")
# Import check_connection with the correct app type
import_module = __import__('primary.api', fromlist=[''])
check_connection = getattr(import_module, 'check_connection')
# Override the global APP_TYPE for this thread
os.environ["APP_TYPE"] = app_type
api_connected = False
connection_attempts = 0
while not api_connected and not restart_cycle:
refresh_settings() # Ensure latest settings are loaded
api_connected = check_connection()
while not api_connected and not restart_cycles[app_type] and not stop_threads:
refresh_settings(app_type) # Ensure latest settings are loaded
api_connected = check_connection(app_type)
if not api_connected:
logger.error(f"Cannot connect to {APP_TYPE.title()}. Please check your API URL and API key.")
logger.info(f"Will retry in 10 seconds...")
app_logger.error(f"Cannot connect to {app_type.title()}. Please check your API URL and API key.")
app_logger.info(f"Will retry in 10 seconds...")
for _ in range(10):
time.sleep(1)
if restart_cycle:
if restart_cycles[app_type] or stop_threads:
break
connection_attempts += 1
if restart_cycle:
logger.warning("⚠️ Restarting cycle due to settings change... ⚠️")
continue
if connection_attempts >= 3:
app_logger.warning(f"Multiple failed connection attempts to {app_type.title()}. Will try again next cycle.")
break
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
if not api_connected:
logger.error("Connection failed, skipping this cycle.")
app_logger.error(f"Connection to {app_type} failed, skipping this cycle.")
time.sleep(10)
continue
processing_done = False
download_queue_size = get_download_queue_size()
if MINIMUM_DOWNLOAD_QUEUE_SIZE < 0 or (MINIMUM_DOWNLOAD_QUEUE_SIZE >= 0 and download_queue_size <= MINIMUM_DOWNLOAD_QUEUE_SIZE):
if restart_cycle:
logger.warning("⚠️ Restarting cycle due to settings change... ⚠️")
continue
if APP_TYPE == "sonarr":
# App-specific processing logic
if app_type == "sonarr":
# Get download queue size with the app-specific function
download_queue_size = sonarr_get_download_queue_size()
min_download_queue_size = MINIMUM_DOWNLOAD_QUEUE_SIZE
if min_download_queue_size < 0 or (min_download_queue_size >= 0 and download_queue_size <= min_download_queue_size):
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
# Get app-specific settings
from primary.config import HUNT_MISSING_SHOWS, HUNT_UPGRADE_EPISODES
if HUNT_MISSING_SHOWS > 0:
logger.info(f"Configured to look for {HUNT_MISSING_SHOWS} missing shows")
from primary.missing import process_missing_episodes
if process_missing_episodes():
app_logger.info(f"Configured to look for {HUNT_MISSING_SHOWS} missing shows")
if process_missing_episodes(lambda: restart_cycles[app_type]):
processing_done = True
else:
logger.info("No missing episodes processed - check if you have any missing episodes in Sonarr")
if restart_cycle:
logger.warning("⚠️ Restarting cycle due to settings change... ⚠️")
app_logger.info("No missing episodes processed - check if you have any missing episodes in Sonarr")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
logger.info("Missing shows search disabled (HUNT_MISSING_SHOWS=0)")
app_logger.info("Missing shows search disabled (HUNT_MISSING_SHOWS=0)")
if HUNT_UPGRADE_EPISODES > 0:
logger.info(f"Configured to look for {HUNT_UPGRADE_EPISODES} quality upgrades")
from primary.upgrade import process_cutoff_upgrades
if process_cutoff_upgrades():
app_logger.info(f"Configured to look for {HUNT_UPGRADE_EPISODES} quality upgrades")
if process_cutoff_upgrades(lambda: restart_cycles[app_type]):
processing_done = True
else:
logger.info("No quality upgrades processed - check if you have any cutoff unmet episodes in Sonarr")
if restart_cycle:
logger.warning("⚠️ Restarting cycle due to settings change... ⚠️")
app_logger.info("No quality upgrades processed - check if you have any cutoff unmet episodes in Sonarr")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
logger.info("Quality upgrades search disabled (HUNT_UPGRADE_EPISODES=0)")
else:
logger.info(f"Download queue size ({download_queue_size}) is above the minimum threshold ({MINIMUM_DOWNLOAD_QUEUE_SIZE}). Skipped processing.")
calculate_reset_time()
refresh_settings()
app_logger.info("Quality upgrades search disabled (HUNT_UPGRADE_EPISODES=0)")
else:
app_logger.info(f"Download queue size ({download_queue_size}) is above the minimum threshold ({min_download_queue_size}). Skipped processing.")
elif app_type == "radarr":
# Get download queue size with the app-specific function
download_queue_size = sonarr_get_download_queue_size() # Placeholder - will be replaced with radarr-specific function
min_download_queue_size = MINIMUM_DOWNLOAD_QUEUE_SIZE
if min_download_queue_size < 0 or (min_download_queue_size >= 0 and download_queue_size <= min_download_queue_size):
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
# Get app-specific settings
from primary.config import HUNT_MISSING_MOVIES, HUNT_UPGRADE_MOVIES
if HUNT_MISSING_MOVIES > 0:
app_logger.info(f"Configured to look for {HUNT_MISSING_MOVIES} missing movies")
if process_missing_movies(lambda: restart_cycles[app_type]):
processing_done = True
else:
app_logger.info("No missing movies processed - feature not yet fully implemented")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
app_logger.info("Missing movies search disabled (HUNT_MISSING_MOVIES=0)")
if HUNT_UPGRADE_MOVIES > 0:
app_logger.info(f"Configured to look for {HUNT_UPGRADE_MOVIES} quality upgrades")
if process_cutoff_upgrades(lambda: restart_cycles[app_type]):
processing_done = True
else:
app_logger.info("No quality upgrades processed - feature not yet fully implemented")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
app_logger.info("Quality upgrades search disabled (HUNT_UPGRADE_MOVIES=0)")
else:
app_logger.info(f"Download queue size ({download_queue_size}) is above the minimum threshold ({min_download_queue_size}). Skipped processing.")
elif app_type == "lidarr":
# Get download queue size with the app-specific function
download_queue_size = sonarr_get_download_queue_size() # Placeholder - will be replaced with lidarr-specific function
min_download_queue_size = MINIMUM_DOWNLOAD_QUEUE_SIZE
if min_download_queue_size < 0 or (min_download_queue_size >= 0 and download_queue_size <= min_download_queue_size):
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
# Get app-specific settings
from primary.config import HUNT_MISSING_ALBUMS, HUNT_UPGRADE_TRACKS
if HUNT_MISSING_ALBUMS > 0:
app_logger.info(f"Configured to look for {HUNT_MISSING_ALBUMS} missing albums")
if process_missing_albums(lambda: restart_cycles[app_type]):
processing_done = True
else:
app_logger.info("No missing albums processed - feature not yet fully implemented")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
app_logger.info("Missing albums search disabled (HUNT_MISSING_ALBUMS=0)")
if HUNT_UPGRADE_TRACKS > 0:
app_logger.info(f"Configured to look for {HUNT_UPGRADE_TRACKS} quality upgrades")
if process_cutoff_upgrades(lambda: restart_cycles[app_type]):
processing_done = True
else:
app_logger.info("No quality upgrades processed - feature not yet fully implemented")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
app_logger.info("Quality upgrades search disabled (HUNT_UPGRADE_TRACKS=0)")
else:
app_logger.info(f"Download queue size ({download_queue_size}) is above the minimum threshold ({min_download_queue_size}). Skipped processing.")
elif app_type == "readarr":
# Get download queue size with the app-specific function
download_queue_size = sonarr_get_download_queue_size() # Placeholder - will be replaced with readarr-specific function
min_download_queue_size = MINIMUM_DOWNLOAD_QUEUE_SIZE
if min_download_queue_size < 0 or (min_download_queue_size >= 0 and download_queue_size <= min_download_queue_size):
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
# Get app-specific settings
from primary.config import HUNT_MISSING_BOOKS, HUNT_UPGRADE_BOOKS
if HUNT_MISSING_BOOKS > 0:
app_logger.info(f"Configured to look for {HUNT_MISSING_BOOKS} missing books")
if process_missing_books(lambda: restart_cycles[app_type]):
processing_done = True
else:
app_logger.info("No missing books processed - feature not yet fully implemented")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
app_logger.info("Missing books search disabled (HUNT_MISSING_BOOKS=0)")
if HUNT_UPGRADE_BOOKS > 0:
app_logger.info(f"Configured to look for {HUNT_UPGRADE_BOOKS} quality upgrades")
if process_cutoff_upgrades(lambda: restart_cycles[app_type]):
processing_done = True
else:
app_logger.info("No quality upgrades processed - feature not yet fully implemented")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ Restarting {app_type} cycle due to settings change... ⚠️")
continue
else:
app_logger.info("Quality upgrades search disabled (HUNT_UPGRADE_BOOKS=0)")
else:
app_logger.info(f"Download queue size ({download_queue_size}) is above the minimum threshold ({min_download_queue_size}). Skipped processing.")
calculate_reset_time(app_type)
refresh_settings(app_type)
from primary.config import SLEEP_DURATION as CURRENT_SLEEP_DURATION
logger.info(f"Cycle complete. Sleeping {CURRENT_SLEEP_DURATION}s before next cycle...")
app_logger.info(f"{app_type} cycle complete. Sleeping {CURRENT_SLEEP_DURATION}s before next cycle...")
server_ip = get_ip_address()
logger.info(f"Web interface available at http://{server_ip}:9705")
app_logger.info(f"Web interface available at http://{server_ip}:9705")
sleep_start = time.time()
sleep_end = sleep_start + CURRENT_SLEEP_DURATION
while time.time() < sleep_end and not restart_cycle:
while time.time() < sleep_end and not restart_cycles[app_type] and not stop_threads:
time.sleep(min(1, sleep_end - time.time()))
if int((time.time() - sleep_start) % 60) == 0 and time.time() < sleep_end - 10:
remaining = int(sleep_end - time.time())
logger.debug(f"Sleeping... {remaining}s remaining until next cycle")
if restart_cycle:
logger.warning("⚠️ Sleep interrupted due to settings change. Restarting cycle immediately... ⚠️")
app_logger.debug(f"{app_type} sleeping... {remaining}s remaining until next cycle")
if restart_cycles[app_type]:
app_logger.warning(f"⚠️ {app_type} sleep interrupted due to settings change. Restarting cycle immediately... ⚠️")
break
if __name__ == "__main__":
def start_app_threads():
"""Start threads for all configured apps"""
# Check which apps are configured
configured_apps = keys_manager.get_configured_apps()
for app_type, is_configured in configured_apps.items():
if is_configured and app_type not in app_threads:
logger.info(f"Starting thread for {app_type}")
thread = threading.Thread(target=app_specific_loop, args=(app_type,), daemon=True)
app_threads[app_type] = thread
thread.start()
def check_and_restart_threads():
"""Check if any threads have died and restart them"""
for app_type, thread in list(app_threads.items()):
if not thread.is_alive():
logger.warning(f"{app_type} thread died, restarting...")
del app_threads[app_type]
new_thread = threading.Thread(target=app_specific_loop, args=(app_type,), daemon=True)
app_threads[app_type] = new_thread
new_thread.start()
def shutdown_threads():
"""Signal all threads to stop and wait for them to finish"""
global stop_threads
stop_threads = True
logger.info("Shutting down all threads...")
# Wait for all threads to finish
for app_type, thread in app_threads.items():
logger.info(f"Waiting for {app_type} thread to finish...")
thread.join(timeout=10)
logger.info("All threads stopped")
def main():
"""Main entry point for Huntarr"""
# Log configuration settings
log_configuration(logger)
try:
main_loop()
# Start threads for all configured apps
start_app_threads()
# Main loop to monitor threads
while True:
# Check if any configured apps need threads started
start_app_threads()
# Check if any threads have died and restart them
check_and_restart_threads()
# Sleep for a bit
time.sleep(5)
except KeyboardInterrupt:
logger.info("Huntarr stopped by user.")
shutdown_threads()
sys.exit(0)
except Exception as e:
logger.exception(f"Unexpected error: {e}")
sys.exit(1)
shutdown_threads()
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,102 +1,275 @@
#!/usr/bin/env python3
"""
State management for Huntarr
Handles tracking which items have been processed
State management module for Huntarr
Handles all persistence of program state
"""
import os
import datetime
import time
import pathlib
from typing import List
import json
from typing import List, Dict, Any, Optional
from primary.utils.logger import logger
from primary.config import STATE_RESET_INTERVAL_HOURS, APP_TYPE
from primary import settings_manager
# State directory setup
STATE_DIR = pathlib.Path("/config/stateful")
STATE_DIR.mkdir(parents=True, exist_ok=True)
# Create state directories based on app type
def get_state_file_path(app_type: str, state_type: str) -> str:
"""
Get the path to a state file based on app type and state type.
Args:
app_type: The type of app (sonarr, radarr, etc.)
state_type: The type of state file (e.g., processed_missing, processed_upgrades)
Returns:
The absolute path to the state file
"""
if app_type == "sonarr":
base_path = "/tmp/huntarr-state/sonarr"
elif app_type == "radarr":
base_path = "/tmp/huntarr-state/radarr"
elif app_type == "lidarr":
base_path = "/tmp/huntarr-state/lidarr"
elif app_type == "readarr":
base_path = "/tmp/huntarr-state/readarr"
else:
base_path = "/tmp/huntarr-state/unknown"
# Ensure the directory exists
os.makedirs(base_path, exist_ok=True)
return f"{base_path}/{state_type}.json"
# Create app-specific state file paths
if APP_TYPE == "sonarr":
PROCESSED_MISSING_FILE = STATE_DIR / "processed_missing_sonarr.txt"
PROCESSED_UPGRADE_FILE = STATE_DIR / "processed_upgrade_sonarr.txt"
elif APP_TYPE == "radarr":
PROCESSED_MISSING_FILE = STATE_DIR / "processed_missing_radarr.txt"
PROCESSED_UPGRADE_FILE = STATE_DIR / "processed_upgrade_radarr.txt"
elif APP_TYPE == "lidarr":
PROCESSED_MISSING_FILE = STATE_DIR / "processed_missing_lidarr.txt"
PROCESSED_UPGRADE_FILE = STATE_DIR / "processed_upgrade_lidarr.txt"
elif APP_TYPE == "readarr":
PROCESSED_MISSING_FILE = STATE_DIR / "processed_missing_readarr.txt"
PROCESSED_UPGRADE_FILE = STATE_DIR / "processed_upgrade_readarr.txt"
else:
# Default fallback to sonarr
PROCESSED_MISSING_FILE = STATE_DIR / "processed_missing_sonarr.txt"
PROCESSED_UPGRADE_FILE = STATE_DIR / "processed_upgrade_sonarr.txt"
# Define state file paths based on the get_state_file_path function
PROCESSED_MISSING_FILE = get_state_file_path(APP_TYPE, "processed_missing")
PROCESSED_UPGRADES_FILE = get_state_file_path(APP_TYPE, "processed_upgrades")
LAST_RESET_FILE = get_state_file_path(APP_TYPE, "last_reset")
# Create files if they don't exist
PROCESSED_MISSING_FILE.touch(exist_ok=True)
PROCESSED_UPGRADE_FILE.touch(exist_ok=True)
def load_processed_ids(file_path: pathlib.Path) -> List[int]:
"""Load processed item IDs from a file."""
def get_last_reset_time(app_type: str = None) -> datetime.datetime:
"""
Get the last time the state was reset for a specific app type.
Args:
app_type: The type of app to get last reset time for. If None, uses APP_TYPE.
Returns:
The datetime of the last reset, or a very old date if no reset has occurred.
"""
current_app_type = app_type or APP_TYPE
reset_file = get_state_file_path(current_app_type, "last_reset")
try:
with open(file_path, 'r') as f:
return [int(line.strip()) for line in f if line.strip().isdigit()]
if os.path.exists(reset_file):
with open(reset_file, "r") as f:
reset_time_str = f.read().strip()
return datetime.datetime.fromisoformat(reset_time_str)
except Exception as e:
logger.error(f"Error reading processed IDs from {file_path}: {e}")
logger.error(f"Error reading last reset time for {current_app_type}: {e}")
# Default to a very old date if no reset has occurred
return datetime.datetime.fromtimestamp(0)
def set_last_reset_time(reset_time: datetime.datetime, app_type: str = None) -> None:
"""
Set the last time the state was reset for a specific app type.
Args:
reset_time: The datetime to set
app_type: The type of app to set last reset time for. If None, uses APP_TYPE.
"""
current_app_type = app_type or APP_TYPE
reset_file = get_state_file_path(current_app_type, "last_reset")
try:
# Ensure the directory exists
os.makedirs(os.path.dirname(reset_file), exist_ok=True)
with open(reset_file, "w") as f:
f.write(reset_time.isoformat())
except Exception as e:
logger.error(f"Error writing last reset time for {current_app_type}: {e}")
def check_state_reset(app_type: str = None) -> bool:
"""
Check if the state needs to be reset based on the reset interval.
If it's time to reset, clears the processed IDs and updates the last reset time.
Args:
app_type: The type of app to check state reset for. If None, uses APP_TYPE.
Returns:
True if the state was reset, False otherwise.
"""
current_app_type = app_type or APP_TYPE
# Get reset interval from settings
reset_interval = settings_manager.get_setting("huntarr", "state_reset_interval_hours", 168)
last_reset = get_last_reset_time(current_app_type)
now = datetime.datetime.now()
# Calculate the time delta since the last reset
delta = now - last_reset
hours_passed = delta.total_seconds() / 3600
# Reset if it's been longer than the reset interval
if hours_passed >= reset_interval:
logger.info(f"State files for {current_app_type} have not been reset in {hours_passed:.1f} hours. Resetting now.")
# Clear processed IDs
clear_processed_ids(current_app_type)
# Update the last reset time
set_last_reset_time(now, current_app_type)
return True
return False
def clear_processed_ids(app_type: str = None) -> None:
"""
Clear all processed IDs for a specific app type.
Args:
app_type: The type of app to clear processed IDs for. If None, uses APP_TYPE.
"""
current_app_type = app_type or APP_TYPE
# Clear missing IDs
missing_file = get_state_file_path(current_app_type, "processed_missing")
try:
if os.path.exists(missing_file):
with open(missing_file, "w") as f:
f.write("[]")
logger.info(f"Cleared processed missing IDs for {current_app_type}")
except Exception as e:
logger.error(f"Error clearing processed missing IDs for {current_app_type}: {e}")
# Clear upgrade IDs
upgrades_file = get_state_file_path(current_app_type, "processed_upgrades")
try:
if os.path.exists(upgrades_file):
with open(upgrades_file, "w") as f:
f.write("[]")
logger.info(f"Cleared processed upgrade IDs for {current_app_type}")
except Exception as e:
logger.error(f"Error clearing processed upgrade IDs for {current_app_type}: {e}")
def calculate_reset_time(app_type: str = None) -> str:
"""
Calculate when the next state reset will occur.
Args:
app_type: The type of app to calculate reset time for. If None, uses APP_TYPE.
Returns:
A string representation of when the next reset will occur.
"""
current_app_type = app_type or APP_TYPE
# Get reset interval from settings
reset_interval = settings_manager.get_setting("huntarr", "state_reset_interval_hours", 168)
last_reset = get_last_reset_time(current_app_type)
next_reset = last_reset + datetime.timedelta(hours=reset_interval)
now = datetime.datetime.now()
# If the next reset is in the past, it will reset in the next cycle
if next_reset < now:
return "Next reset: at the start of the next cycle"
# Calculate time until next reset
delta = next_reset - now
hours = delta.total_seconds() / 3600
if hours < 1:
minutes = delta.total_seconds() / 60
return f"Next reset: in {int(minutes)} minutes"
elif hours < 24:
return f"Next reset: in {int(hours)} hours"
else:
days = hours / 24
return f"Next reset: in {int(days)} days"
def load_processed_ids(filepath: str) -> List[int]:
"""
Load processed IDs from a file.
Args:
filepath: The path to the file
Returns:
A list of processed IDs
"""
try:
if os.path.exists(filepath):
with open(filepath, "r") as f:
return json.load(f)
return []
except Exception as e:
logger.error(f"Error loading processed IDs from {filepath}: {e}")
return []
def save_processed_id(file_path: pathlib.Path, obj_id: int) -> None:
"""Save a processed item ID to a file."""
def save_processed_ids(filepath: str, ids: List[int]) -> None:
"""
Save processed IDs to a file.
Args:
filepath: The path to the file
ids: The list of IDs to save
"""
try:
with open(file_path, 'a') as f:
f.write(f"{obj_id}\n")
# Ensure the directory exists
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
json.dump(ids, f)
except Exception as e:
logger.error(f"Error writing to {file_path}: {e}")
logger.error(f"Error saving processed IDs to {filepath}: {e}")
def truncate_processed_list(file_path: pathlib.Path, max_lines: int = 500) -> None:
"""Truncate the processed list to prevent unbounded growth."""
try:
# Only check if file is somewhat large
if file_path.stat().st_size > 10000:
lines = file_path.read_text().splitlines()
if len(lines) > max_lines:
logger.info(f"Processed list is large. Truncating to last {max_lines} entries.")
with open(file_path, 'w') as f:
f.write('\n'.join(lines[-max_lines:]) + '\n')
except Exception as e:
logger.error(f"Error truncating {file_path}: {e}")
def save_processed_id(filepath: str, item_id: int) -> None:
"""
Add a single ID to a processed IDs file.
Args:
filepath: The path to the file
item_id: The ID to add
"""
processed_ids = load_processed_ids(filepath)
if item_id not in processed_ids:
processed_ids.append(item_id)
save_processed_ids(filepath, processed_ids)
def check_state_reset() -> None:
"""Check if state files need to be reset based on their age."""
if STATE_RESET_INTERVAL_HOURS <= 0:
logger.info("State reset is disabled. Processed items will be remembered indefinitely.")
return
def truncate_processed_list(filepath: str, max_items: int = 1000) -> None:
"""
Truncate a processed IDs list to a maximum number of items.
This helps prevent the file from growing too large over time.
missing_age = time.time() - PROCESSED_MISSING_FILE.stat().st_mtime
upgrade_age = time.time() - PROCESSED_UPGRADE_FILE.stat().st_mtime
reset_interval_seconds = STATE_RESET_INTERVAL_HOURS * 3600
Args:
filepath: The path to the file
max_items: The maximum number of items to keep
"""
processed_ids = load_processed_ids(filepath)
if missing_age >= reset_interval_seconds or upgrade_age >= reset_interval_seconds:
logger.info(f"Resetting processed state files (older than {STATE_RESET_INTERVAL_HOURS} hours).")
PROCESSED_MISSING_FILE.write_text("")
PROCESSED_UPGRADE_FILE.write_text("")
if len(processed_ids) > max_items:
# Keep only the most recent items (at the end of the list)
processed_ids = processed_ids[-max_items:]
save_processed_ids(filepath, processed_ids)
logger.debug(f"Truncated {filepath} to {max_items} items")
def calculate_reset_time() -> None:
"""Calculate and display time until the next state reset."""
if STATE_RESET_INTERVAL_HOURS <= 0:
logger.info("State reset is disabled. Processed items will be remembered indefinitely.")
return
# Initialize state files for all app types
def init_state_files() -> None:
"""Initialize state files for all app types"""
app_types = ["sonarr", "radarr", "lidarr", "readarr"]
current_time = time.time()
missing_age = current_time - PROCESSED_MISSING_FILE.stat().st_mtime
upgrade_age = current_time - PROCESSED_UPGRADE_FILE.stat().st_mtime
reset_interval_seconds = STATE_RESET_INTERVAL_HOURS * 3600
missing_remaining = reset_interval_seconds - missing_age
upgrade_remaining = reset_interval_seconds - upgrade_age
remaining_seconds = min(missing_remaining, upgrade_remaining)
remaining_minutes = int(remaining_seconds / 60)
logger.info(f"State reset will occur in approximately {remaining_minutes} minutes.")
for app_type in app_types:
missing_file = get_state_file_path(app_type, "processed_missing")
upgrades_file = get_state_file_path(app_type, "processed_upgrades")
# Initialize the files if they don't exist
for filepath in [missing_file, upgrades_file]:
if not os.path.exists(filepath):
save_processed_ids(filepath, [])
# Run initialization
init_state_files()

View File

@@ -1,31 +1,45 @@
#!/usr/bin/env python3
"""
Logging configuration for Huntarr
Supports separate log files for each application type
"""
import logging
import sys
import os
import pathlib
from typing import Dict, Optional
# Create log directory
LOG_DIR = pathlib.Path("/tmp/huntarr-logs")
LOG_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOG_DIR / "huntarr.log"
# Global logger instance
# Default log file for general messages
MAIN_LOG_FILE = LOG_DIR / "huntarr.log"
# App-specific log files
APP_LOG_FILES = {
"sonarr": LOG_DIR / "huntarr-sonarr.log",
"radarr": LOG_DIR / "huntarr-radarr.log",
"lidarr": LOG_DIR / "huntarr-lidarr.log",
"readarr": LOG_DIR / "huntarr-readarr.log"
}
# Global logger instances
logger = None
app_loggers: Dict[str, logging.Logger] = {}
def setup_logger(debug_mode=None):
def setup_logger(debug_mode=None, app_type=None):
"""Configure and return the application logger
Args:
debug_mode (bool, optional): Override the DEBUG_MODE from config. Defaults to None.
app_type (str, optional): The app type to set up a logger for. Defaults to None (main logger).
Returns:
logging.Logger: The configured logger
"""
global logger
global logger, app_loggers
# Get DEBUG_MODE from config, but only if we haven't been given a value
# Use a safe approach to avoid circular imports
@@ -41,58 +55,116 @@ def setup_logger(debug_mode=None):
else:
use_debug_mode = debug_mode
if logger is None:
# First-time setup
logger = logging.getLogger("huntarr")
# Determine the logger and log file to use
if app_type in APP_LOG_FILES:
# Use or create an app-specific logger
log_name = f"huntarr.{app_type}"
log_file = APP_LOG_FILES[app_type]
if log_name in app_loggers:
# Reset existing logger
current_logger = app_loggers[log_name]
for handler in current_logger.handlers[:]:
current_logger.removeHandler(handler)
else:
# Create a new logger
current_logger = logging.getLogger(log_name)
app_loggers[log_name] = current_logger
else:
# Reset handlers to avoid duplicates
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Use or create the main logger
log_name = "huntarr"
log_file = MAIN_LOG_FILE
if logger is None:
# First-time setup
current_logger = logging.getLogger(log_name)
logger = current_logger
else:
# Reset handlers to avoid duplicates
current_logger = logger
for handler in current_logger.handlers[:]:
current_logger.removeHandler(handler)
# Set the log level based on use_debug_mode
logger.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
current_logger.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
# Create console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
# Create file handler for the web interface
file_handler = logging.FileHandler(LOG_FILE)
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
# Set format
# Set format - include app_type in format if provided
log_format = "%(asctime)s - "
if app_type:
log_format += f"{app_type} - "
else:
log_format += "huntarr - "
log_format += "%(levelname)s - %(message)s"
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s",
log_format,
datefmt="%Y-%m-%d %H:%M:%S",
)
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# Add handlers to logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
current_logger.addHandler(console_handler)
current_logger.addHandler(file_handler)
if use_debug_mode:
logger.debug("Debug logging enabled")
current_logger.debug("Debug logging enabled")
return logger
return current_logger
# Create the logger instance on module import
# Create the main logger instance on module import
logger = setup_logger()
def debug_log(message: str, data: object = None) -> None:
"""Log debug messages with optional data."""
if logger.level <= logging.DEBUG:
logger.debug(f"{message}")
def get_logger(app_type: str) -> logging.Logger:
"""
Get a logger for a specific app type.
Args:
app_type: The app type to get a logger for.
Returns:
A logger specific to the app type.
"""
if app_type in APP_LOG_FILES:
log_name = f"huntarr.{app_type}"
if log_name not in app_loggers:
# Set up the logger if it doesn't exist
return setup_logger(app_type=app_type)
return app_loggers[log_name]
else:
# Return the main logger if the app type is not recognized
return logger
def debug_log(message: str, data: object = None, app_type: Optional[str] = None) -> None:
"""
Log debug messages with optional data.
Args:
message: The message to log.
data: Optional data to include with the message.
app_type: Optional app type to log to a specific app's log file.
"""
current_logger = get_logger(app_type) if app_type else logger
if current_logger.level <= logging.DEBUG:
current_logger.debug(f"{message}")
if data is not None:
try:
import json
as_json = json.dumps(data)
if len(as_json) > 500:
as_json = as_json[:500] + "..."
logger.debug(as_json)
current_logger.debug(as_json)
except:
data_str = str(data)
if len(data_str) > 500:
data_str = data_str[:500] + "..."
logger.debug(data_str)
current_logger.debug(data_str)

View File

@@ -18,7 +18,8 @@ import base64
import io
import requests
import logging
from flask import Flask
import threading
from flask import Flask, render_template, request, jsonify, Response, send_from_directory, redirect, url_for, session
from primary.config import API_URL
from primary import settings_manager, keys_manager
from primary.utils.logger import setup_logger
@@ -40,15 +41,7 @@ log.setLevel(logging.ERROR)
# Create Flask app
app = Flask(__name__, template_folder='../templates', static_folder='../static')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
# Log file location
LOG_FILE = "/tmp/huntarr-logs/huntarr.log"
LOG_DIR = pathlib.Path(LOG_FILE).parent
LOG_DIR.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger("huntarr-web")
logging.basicConfig(level=logging.DEBUG)
app.secret_key = os.environ.get('SECRET_KEY', 'dev_key_for_sessions')
# Register blueprints
app.register_blueprint(common_bp, url_prefix="/")
@@ -57,34 +50,188 @@ app.register_blueprint(radarr_bp, url_prefix="/radarr")
app.register_blueprint(lidarr_bp, url_prefix="/lidarr")
app.register_blueprint(readarr_bp, url_prefix="/readarr")
# Authentication middleware remains here for app-wide protection if needed
# Global main process PID
MAIN_PID = os.getpid()
# Lock for accessing the log files
log_lock = threading.Lock()
# Root directory for log files
LOG_DIR = "/tmp/huntarr-logs"
# Default log refresh interval (seconds)
LOG_REFRESH_INTERVAL = settings_manager.get_setting("huntarr", "log_refresh_interval_seconds", 30)
# Function to get the PID of the main python process
def get_main_process_pid():
return MAIN_PID
# Function to trigger reload of settings for a specific app
def trigger_settings_reload(app_type):
"""
Trigger a settings reload for a specific app by sending a SIGUSR1 signal to the main process
with the app type set in an environment variable.
Args:
app_type: The app type to reload settings for (sonarr, radarr, etc.)
"""
# Set environment variable for the app type to restart
os.environ["RESTART_APP_TYPE"] = app_type
# Send SIGUSR1 to the main process
pid = get_main_process_pid()
if pid:
try:
os.kill(pid, signal.SIGUSR1)
return True
except Exception as e:
print(f"Error sending signal to process {pid}: {e}")
return False
@app.before_request
def before_request():
auth_result = authenticate_request()
if auth_result:
return auth_result
def get_main_process_pid():
try:
for proc in os.listdir('/proc'):
if not proc.isdigit():
continue
try:
with open(f'/proc/{proc}/cmdline', 'r') as f:
cmdline = f.read().replace('\0', ' ')
if 'python' in cmdline and 'primary/main.py' in cmdline:
return int(proc)
except (IOError, ProcessLookupError):
continue
return None
except:
return None
@app.route('/')
def home():
return render_template('index.html')
if __name__ == "__main__":
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
from primary.utils.app_utils import get_ip_address
ip_address = get_ip_address()
with open(LOG_FILE, 'a') as f:
f.write(f"{timestamp} - huntarr-web - INFO - Web server starting on port 9705\n")
f.write(f"{timestamp} - huntarr-web - INFO - Web interface available at http://{ip_address}:9705\n")
app.run(host='0.0.0.0', port=9705, debug=False, threaded=True)
@app.route('/user')
def user():
# User account screen
return render_template('user.html')
@app.route('/settings')
def settings():
# Redirect to the home page with settings tab active
return render_template('index.html')
@app.route('/logs')
def logs():
"""
Event stream for logs.
Filter logs by app type using the 'app' query parameter.
Example: /logs?app=sonarr
"""
app_type = request.args.get('app', 'sonarr') # Default to sonarr if no app specified
# Validate app_type
if app_type not in ['sonarr', 'radarr', 'lidarr', 'readarr']:
app_type = 'sonarr' # Default to sonarr for invalid app types
def generate():
# Get the specific log file for the app type
log_file_path = f"{LOG_DIR}/huntarr-{app_type}.log"
if not os.path.exists(log_file_path):
# Create the file if it doesn't exist
with open(log_file_path, 'a') as f:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
f.write(f"{timestamp} - {app_type} - INFO - Log file created\n")
# Initial position - start at the end of the file
with open(log_file_path, 'r') as f:
f.seek(0, os.SEEK_END)
pos = f.tell()
while True:
with log_lock:
with open(log_file_path, 'r') as f:
f.seek(pos)
lines = f.readlines()
pos = f.tell()
if lines:
for line in lines:
# Check if the line contains the app_type
if f" - {app_type} - " in line or " - huntarr - " in line:
yield f"data: {line}\n\n"
time.sleep(1) # Check for new logs every second
return Response(generate(), mimetype='text/event-stream')
@app.route('/api/settings', methods=['GET', 'POST'])
def api_settings():
if request.method == 'GET':
# Return all settings
return jsonify(settings_manager.get_all_settings())
elif request.method == 'POST':
# Save settings and trigger reload for the specific app type
data = request.json
app_type = data.get('app_type', 'sonarr')
result = settings_manager.save_settings(data)
# Trigger reload for the specific app type
if result.get('success', False):
reload_success = trigger_settings_reload(app_type)
if not reload_success:
result['message'] = "Settings saved but failed to trigger reload"
return jsonify(result)
@app.route('/api/settings/theme', methods=['GET', 'POST'])
def api_theme():
if request.method == 'GET':
# Return current theme setting
dark_mode = settings_manager.get_setting("ui", "dark_mode", False)
return jsonify({"dark_mode": dark_mode})
elif request.method == 'POST':
# Save theme setting
data = request.json
dark_mode = data.get('dark_mode', False)
settings_manager.set_setting("ui", "dark_mode", dark_mode)
return jsonify({"success": True})
@app.route('/api/settings/reset', methods=['POST'])
def api_reset_settings():
data = request.json
app_type = data.get('app', 'sonarr')
# Reset settings for the specific app type
settings_manager.reset_settings(app_type)
# Trigger reload for the specific app type
reload_success = trigger_settings_reload(app_type)
return jsonify({"success": True, "reload_triggered": reload_success})
@app.route('/api/app-settings', methods=['GET'])
def api_app_settings():
app_type = request.args.get('app', 'sonarr')
# Validate app_type
if app_type not in ['sonarr', 'radarr', 'lidarr', 'readarr']:
return jsonify({"success": False, "message": f"Invalid app type: {app_type}"})
# Get API credentials for the specified app type
api_url, api_key = keys_manager.get_api_keys(app_type)
return jsonify({
"success": True,
"api_url": api_url,
"api_key": api_key
})
@app.route('/api/configured-apps', methods=['GET'])
def api_configured_apps():
# Return the configured status of all apps
configured_apps = keys_manager.get_configured_apps()
return jsonify(configured_apps)
def start_web_server():
"""Start the web server in debug or production mode"""
debug_mode = os.environ.get('DEBUG', 'false').lower() == 'true'
host = '0.0.0.0' # Listen on all interfaces
port = int(os.environ.get('PORT', 9705))
# Ensure the log directory exists
os.makedirs(LOG_DIR, exist_ok=True)
# In production, use Werkzeug's simple server
app.run(host=host, port=port, debug=debug_mode, use_reloader=False)
if __name__ == '__main__':
start_web_server()