Merge pull request #364 from plexguide/hunt0015

Hunt0015
This commit is contained in:
Admin9705
2025-05-07 20:31:24 -04:00
committed by GitHub
17 changed files with 2749 additions and 12 deletions
+123
View File
@@ -1733,3 +1733,126 @@ input:checked + .slider:before {
color: #222;
background: #fff;
}
/* Hunt Status Styles */
.hunt-status-card {
margin-bottom: 1rem;
}
.hunt-status-container {
padding: 1rem;
}
.hunt-status-list {
display: flex;
flex-direction: column;
gap: 1rem;
}
.hunt-status-item {
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: 8px;
padding: 1rem;
transition: all 0.3s ease;
}
.hunt-status-item:hover {
transform: translateY(-2px);
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
.hunt-status-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0.5rem;
}
.hunt-app-name {
font-weight: 600;
color: var(--text-primary);
}
.hunt-instance-name {
color: var(--text-secondary);
font-size: 0.9rem;
}
.hunt-status-content {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0.5rem;
}
.hunt-media-name {
font-weight: 500;
color: var(--text-primary);
}
.hunt-status-badge {
padding: 0.25rem 0.75rem;
border-radius: 12px;
font-size: 0.85rem;
font-weight: 500;
}
.hunt-status-badge.requested {
background-color: var(--warning-bg);
color: var(--warning-text);
}
.hunt-status-badge.searching {
background-color: var(--info-bg);
color: var(--info-text);
}
.hunt-status-badge.found {
background-color: var(--success-bg);
color: var(--success-text);
}
.hunt-status-badge.nothing-found {
background-color: var(--error-bg);
color: var(--error-text);
}
.hunt-status-badge.time-exceeded {
background-color: var(--error-bg);
color: var(--error-text);
}
.hunt-status-badge.completed {
background-color: var(--success-bg);
color: var(--success-text);
}
.hunt-status-footer {
display: flex;
justify-content: space-between;
align-items: center;
font-size: 0.85rem;
color: var(--text-secondary);
}
.no-hunt-status {
text-align: center;
padding: 2rem;
color: var(--text-secondary);
}
/* Responsive adjustments */
@media (max-width: 768px) {
.hunt-status-header,
.hunt-status-content,
.hunt-status-footer {
flex-direction: column;
gap: 0.5rem;
align-items: flex-start;
}
.hunt-status-badge {
align-self: flex-start;
}
}
+210 -3
View File
@@ -235,6 +235,9 @@ const historyModule = {
this.elements.historyEmptyState.style.display = 'none';
this.elements.historyTable.style.display = 'table';
// Make sure we have the tooltip container
this.ensureTooltipContainer();
// Render rows
data.entries.forEach(entry => {
const row = document.createElement('tr');
@@ -243,16 +246,34 @@ const historyModule = {
const appType = entry.app_type ? entry.app_type.charAt(0).toUpperCase() + entry.app_type.slice(1) : '';
const formattedInstance = appType ? `${appType} - ${entry.instance_name}` : entry.instance_name;
// Store the full JSON data as a data attribute
row.dataset.fullJson = JSON.stringify(entry);
row.innerHTML = `
<td>${entry.date_time_readable}</td>
<td>${this.escapeHtml(entry.processed_info)}</td>
<td>
<div class="title-with-info">
${this.escapeHtml(entry.processed_info)}
<span class="info-badge" title="View full details">info</span>
</div>
</td>
<td>${this.formatHuntStatus(entry.hunt_status)}</td>
<td>${this.formatOperationType(entry.operation_type)}</td>
<td>${this.escapeHtml(entry.id)}</td>
<td>${this.escapeHtml(formattedInstance)}</td>
<td>${this.escapeHtml(entry.how_long_ago)}</td>
`;
tableBody.appendChild(row);
// Add hover events to the info badge
const infoBadge = row.querySelector('.info-badge');
if (infoBadge) {
infoBadge.addEventListener('mouseover', (e) => this.showJsonTooltip(e, row));
infoBadge.addEventListener('mouseout', () => this.hideJsonTooltip());
infoBadge.addEventListener('click', (e) => {
e.stopPropagation();
this.toggleJsonTooltip(e, row);
});
}
});
},
@@ -320,6 +341,192 @@ const historyModule = {
default:
return operationType ? this.escapeHtml(operationType.charAt(0).toUpperCase() + operationType.slice(1)) : 'Unknown';
}
},
// Helper function to format hunt status
formatHuntStatus: function(huntStatus) {
if (!huntStatus) return '<span class="status-badge status-unknown">Unknown</span>';
let badgeClass = 'status-unknown';
let displayText = huntStatus;
// Format based on status
switch(huntStatus.toLowerCase()) {
case 'searching':
badgeClass = 'status-searching';
break;
case 'downloaded':
badgeClass = 'status-downloaded';
break;
case 'downloading':
badgeClass = 'status-downloading';
break;
case 'error':
badgeClass = 'status-error';
break;
}
return `<span class="status-badge ${badgeClass}">${displayText}</span>`;
},
// Ensure tooltip container exists
ensureTooltipContainer: function() {
// Check if container already exists
if (document.getElementById('json-tooltip')) return;
// Create tooltip container
const tooltipContainer = document.createElement('div');
tooltipContainer.id = 'json-tooltip';
tooltipContainer.className = 'json-tooltip';
tooltipContainer.style.display = 'none';
document.body.appendChild(tooltipContainer);
// Add styles for the tooltip
if (!document.getElementById('tooltip-styles')) {
const styleEl = document.createElement('style');
styleEl.id = 'tooltip-styles';
styleEl.textContent = `
.json-tooltip {
position: absolute;
z-index: 1000;
max-width: 600px;
max-height: 400px;
overflow: auto;
background: rgba(30, 38, 55, 0.95);
color: #fff;
border-radius: 8px;
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.3);
padding: 15px;
font-family: monospace;
white-space: pre-wrap;
font-size: 12px;
border: 1px solid rgba(90, 109, 137, 0.4);
backdrop-filter: blur(5px);
}
.title-with-info {
display: flex;
align-items: center;
gap: 8px;
}
.info-badge {
display: inline-block;
background-color: #4b6bff;
color: white;
border-radius: 4px;
padding: 2px 6px;
font-size: 11px;
cursor: pointer;
text-transform: uppercase;
font-weight: bold;
letter-spacing: 0.5px;
opacity: 0.8;
transition: all 0.2s ease;
}
.info-badge:hover {
opacity: 1;
transform: scale(1.05);
background-color: #5f7dff;
}
.json-key {
color: #9cdcfe;
}
.json-string {
color: #ce9178;
}
.json-number {
color: #b5cea8;
}
.json-boolean {
color: #569cd6;
}
.json-null {
color: #569cd6;
}
`;
document.head.appendChild(styleEl);
}
},
// Show JSON tooltip
showJsonTooltip: function(event, row) {
const tooltip = document.getElementById('json-tooltip');
if (!tooltip || !row.dataset.fullJson) return;
try {
// Parse JSON data
const jsonData = JSON.parse(row.dataset.fullJson);
// Format JSON with syntax highlighting
const formattedJson = this.formatJsonForDisplay(jsonData);
// Set tooltip content
tooltip.innerHTML = formattedJson;
// Position tooltip near cursor
const x = event.clientX + 15;
let y = event.clientY + 15;
// Check if tooltip would go off screen and adjust accordingly
const rightEdge = x + tooltip.offsetWidth;
const bottomEdge = y + tooltip.offsetHeight;
if (rightEdge > window.innerWidth) {
tooltip.style.left = (x - tooltip.offsetWidth) + 'px';
} else {
tooltip.style.left = x + 'px';
}
if (bottomEdge > window.innerHeight) {
tooltip.style.top = (y - tooltip.offsetHeight) + 'px';
} else {
tooltip.style.top = y + 'px';
}
// Show tooltip
tooltip.style.display = 'block';
} catch (error) {
console.error('Error parsing JSON for tooltip:', error);
}
},
// Hide JSON tooltip
hideJsonTooltip: function() {
const tooltip = document.getElementById('json-tooltip');
if (tooltip) {
tooltip.style.display = 'none';
}
},
// Toggle JSON tooltip on click
toggleJsonTooltip: function(event, row) {
const tooltip = document.getElementById('json-tooltip');
if (!tooltip || !row.dataset.fullJson) return;
// If tooltip is already visible, hide it
if (tooltip.style.display === 'block') {
this.hideJsonTooltip();
return;
}
// Otherwise show it (reuse the show method)
this.showJsonTooltip(event, row);
},
// Format JSON with syntax highlighting
formatJsonForDisplay: function(obj) {
// Convert to formatted string with 2-space indentation
const jsonString = JSON.stringify(obj, null, 2);
// Add syntax highlighting by replacing with HTML
return jsonString
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"([^"]+)":/g, '<span class="json-key">"$1"</span>:') // Keys
.replace(/: "([^"]+)"/g, ': <span class="json-string">"$1"</span>') // String values
.replace(/: ([0-9]+)/g, ': <span class="json-number">$1</span>') // Number values
.replace(/: (true|false)/g, ': <span class="json-boolean">$1</span>') // Boolean values
.replace(/: null/g, ': <span class="json-null">null</span>'); // Null values
}
};
+86
View File
@@ -0,0 +1,86 @@
// Hunt Status Management
class HuntStatusManager {
constructor() {
this.statusContainer = document.querySelector('.hunt-status-container');
this.updateInterval = 30000; // Update every 30 seconds
this.startUpdates();
}
async updateHuntStatuses() {
try {
const response = await fetch('/api/hunt/status');
const data = await response.json();
if (data.status === 'success') {
this.renderStatuses(data.data);
} else {
console.error('Failed to fetch hunt statuses:', data.message);
}
} catch (error) {
console.error('Error updating hunt statuses:', error);
}
}
renderStatuses(statuses) {
if (!this.statusContainer) return;
if (!statuses || statuses.length === 0) {
this.statusContainer.innerHTML = `
<div class="no-hunt-status">
<p>No hunt statuses available</p>
</div>
`;
return;
}
const statusList = document.createElement('div');
statusList.className = 'hunt-status-list';
statuses.forEach(status => {
const statusItem = document.createElement('div');
statusItem.className = 'hunt-status-item';
statusItem.innerHTML = `
<div class="hunt-status-header">
<span class="hunt-app-name">${this.capitalizeFirst(status.app_name)}</span>
<span class="hunt-instance-name">${status.instance_name}</span>
</div>
<div class="hunt-status-content">
<div class="hunt-media-name">${status.media_name}</div>
<div class="hunt-status-badge ${status.status.toLowerCase()}">${status.status}</div>
</div>
<div class="hunt-status-footer">
<span class="hunt-time">Requested: ${this.formatDateTime(status.time_requested)}</span>
<span class="hunt-id">ID: ${status.id}</span>
</div>
`;
statusList.appendChild(statusItem);
});
this.statusContainer.innerHTML = '';
this.statusContainer.appendChild(statusList);
}
capitalizeFirst(str) {
return str.charAt(0).toUpperCase() + str.slice(1);
}
formatDateTime(dateTimeStr) {
const date = new Date(dateTimeStr);
return date.toLocaleString();
}
startUpdates() {
// Initial update
this.updateHuntStatuses();
// Set up periodic updates
setInterval(() => this.updateHuntStatuses(), this.updateInterval);
}
}
// Initialize hunt status manager when the DOM is loaded
document.addEventListener('DOMContentLoaded', () => {
new HuntStatusManager();
});
+14 -2
View File
@@ -812,12 +812,23 @@ let huntarrUI = {
const loggerParts = match[3].split('.');
if (loggerParts.length > 1) {
const possibleApp = loggerParts[1].toLowerCase();
if (['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr'].includes(possibleApp)) {
if ([
'sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr', 'hunting'
].includes(possibleApp)) {
logAppType = possibleApp;
}
// Special: If logger name contains 'hunting', treat as 'hunting'
else if (loggerParts[1].toLowerCase().includes('hunting')) {
logAppType = 'hunting';
}
}
}
// Special case for hunting logs
if (logAppType === 'system' && logString.includes('[HUNTING]')) {
logAppType = 'hunting';
}
// Special case for system logs that may contain app-specific patterns
if (logAppType === 'system') {
// App-specific patterns that may appear in system logs
@@ -828,7 +839,8 @@ let huntarrUI = {
'readarr': ['book', 'author', 'readarr'],
'whisparr': ['scene', 'adult', 'whisparr'],
'eros': ['eros', 'whisparr v3', 'whisparrv3'],
'swaparr': ['added strike', 'max strikes reached', 'would have removed', 'strikes, removing download', 'processing stalled downloads', 'swaparr']
'swaparr': ['added strike', 'max strikes reached', 'would have removed', 'strikes, removing download', 'processing stalled downloads', 'swaparr'],
'hunting': ['hunting manager', '[hunting]', 'hunt status', 'hunt request', 'huntarr.hunting', 'tracking', 'processed', 'movie id', 'movie file']
};
// Check each app's patterns
@@ -46,10 +46,9 @@
<table class="modern-table history-table">
<thead>
<tr>
<th>Date and Time</th>
<th>Processed Information</th>
<th>Name</th>
<th>Status</th>
<th>Operation</th>
<th>ID Number</th>
<th>Name of Instance</th>
<th>How Long Ago</th>
</tr>
@@ -376,6 +375,52 @@
border: 1px solid rgba(0, 123, 255, 0.3);
}
/* Status styling */
.hunt-status-success {
color: #2ecc71;
font-weight: 500;
padding: 3px 8px;
background-color: rgba(46, 204, 113, 0.1);
border-radius: 4px;
font-size: 0.9em;
}
.hunt-status-searching {
color: #3498db;
font-weight: 500;
padding: 3px 8px;
background-color: rgba(52, 152, 219, 0.1);
border-radius: 4px;
font-size: 0.9em;
}
.hunt-status-found {
color: #f39c12;
font-weight: 500;
padding: 3px 8px;
background-color: rgba(243, 156, 18, 0.1);
border-radius: 4px;
font-size: 0.9em;
}
.hunt-status-downloaded {
color: #2ecc71;
font-weight: 500;
padding: 3px 8px;
background-color: rgba(46, 204, 113, 0.1);
border-radius: 4px;
font-size: 0.9em;
}
.hunt-status-failed {
color: #e74c3c;
font-weight: 500;
padding: 3px 8px;
background-color: rgba(231, 76, 60, 0.1);
border-radius: 4px;
font-size: 0.9em;
}
/* Empty state and loading styling */
.empty-state-message, .loading-indicator {
text-align: center;
@@ -464,7 +509,7 @@
originalRenderHistoryData.call(this, data);
// After the data is rendered, format the operation status columns
const operationCells = document.querySelectorAll('#historyTableBody tr td:nth-child(3)');
const operationCells = document.querySelectorAll('#historyTableBody tr td:nth-child(4)');
operationCells.forEach(cell => {
const operationText = cell.textContent.trim();
const statusClass = operationText.toLowerCase() === 'success' ? 'success' :
@@ -232,6 +232,39 @@
</div>
</div>
<!-- Hunt Status Section -->
<div class="card hunt-status-card">
<div class="card-header">
<h3>Latest Hunt Statuses</h3>
</div>
<div class="hunt-status-container">
{% if latest_hunt_statuses %}
<div class="hunt-status-list">
{% for status in latest_hunt_statuses %}
<div class="hunt-status-item">
<div class="hunt-status-header">
<span class="hunt-app-name">{{ status.app_name|title }}</span>
<span class="hunt-instance-name">{{ status.instance_name }}</span>
</div>
<div class="hunt-status-content">
<div class="hunt-media-name">{{ status.media_name }}</div>
<div class="hunt-status-badge {{ status.status|lower }}">{{ status.status }}</div>
</div>
<div class="hunt-status-footer">
<span class="hunt-time">Requested: {{ status.time_requested }}</span>
<span class="hunt-id">ID: {{ status.id }}</span>
</div>
</div>
{% endfor %}
</div>
{% else %}
<div class="no-hunt-status">
<p>No hunt statuses available</p>
</div>
{% endif %}
</div>
</div>
</div>
</section>
@@ -11,6 +11,7 @@
<option value="whisparr">Whisparr V2</option>
<option value="eros">Whisparr V3</option>
<option value="swaparr">Swaparr</option>
<option value="hunting">Hunting Manager</option>
<option value="system">System</option>
</select>
</div>
+2
View File
@@ -42,6 +42,8 @@
<script src="/static/js/direct-reset.js"></script>
<!-- Stats reset handler -->
<script src="/static/js/stats-reset.js"></script>
<!-- Hunt status handler -->
<script src="/static/js/hunt-status.js"></script>
</body>
</html>
+86
View File
@@ -200,6 +200,92 @@ def get_cutoff_unmet_movies(api_url: str, api_key: str, api_timeout: int, monito
radarr_logger.debug(f"Found {len(unmet_movies)} cutoff unmet movies (monitored_only={monitored_only}).")
return unmet_movies
def get_movie_by_id(api_url: str, api_key: str, movie_id: Union[str, int], api_timeout: int) -> Optional[Dict]:
"""
Get detailed information about a specific movie by its ID.
Args:
api_url: The base URL of the Radarr API
api_key: The API key for authentication
movie_id: The ID of the movie to retrieve
api_timeout: Timeout for the API request
Returns:
The movie object, or None if the request failed
"""
endpoint = f"movie/{movie_id}"
radarr_logger.debug(f"Fetching movie details for ID: {movie_id}")
try:
result = arr_request(api_url, api_key, api_timeout, endpoint)
if result:
radarr_logger.debug(f"Successfully retrieved details for movie ID {movie_id}: {result.get('title', 'Unknown')}")
else:
radarr_logger.error(f"Failed to retrieve details for movie ID: {movie_id}")
return result
except Exception as e:
radarr_logger.error(f"Error fetching movie details for ID {movie_id}: {e}")
return None
def get_movie_file(api_url: str, api_key: str, file_id: Union[str, int], api_timeout: int) -> Optional[Dict]:
"""
Get detailed information about a movie file by its ID.
Args:
api_url: The base URL of the Radarr API
api_key: The API key for authentication
file_id: The ID of the movie file to retrieve
api_timeout: Timeout for the API request
Returns:
The movie file object, or None if the request failed
"""
if not file_id:
radarr_logger.error("No file ID provided for get_movie_file")
return None
endpoint = f"moviefile/{file_id}"
radarr_logger.debug(f"Fetching movie file details for ID: {file_id}")
try:
result = arr_request(api_url, api_key, api_timeout, endpoint)
if result:
radarr_logger.debug(f"Successfully retrieved movie file details for ID {file_id}")
else:
radarr_logger.error(f"Failed to retrieve movie file details for ID: {file_id}")
return result
except Exception as e:
radarr_logger.error(f"Error fetching movie file details for ID {file_id}: {e}")
return None
def get_download_queue(api_url: str, api_key: str, api_timeout: int) -> Optional[List[Dict]]:
"""
Get the current download queue.
Args:
api_url: The base URL of the Radarr API
api_key: The API key for authentication
api_timeout: Timeout for the API request
Returns:
A list of queue items, or None if the request failed
"""
endpoint = "queue?pageSize=100" # Get a large number of items
radarr_logger.debug("Fetching download queue")
try:
result = arr_request(api_url, api_key, api_timeout, endpoint)
if result:
queue_items = result.get('records', [])
radarr_logger.debug(f"Successfully retrieved download queue: {len(queue_items)} items")
return queue_items
else:
radarr_logger.error("Failed to retrieve download queue")
return []
except Exception as e:
radarr_logger.error(f"Error fetching download queue: {e}")
return []
def refresh_movie(api_url: str, api_key: str, api_timeout: int, movie_id: int,
command_wait_delay: int = 1, command_wait_attempts: int = 600) -> Optional[int]:
"""
File diff suppressed because it is too large Load Diff
+72 -1
View File
@@ -81,13 +81,31 @@ def add_history_entry(app_type, entry_data):
# Create the entry with timestamp
timestamp = int(time.time())
entry = {
# Base required fields
"date_time": timestamp,
"date_time_readable": datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
"processed_info": entry_data["name"],
"id": entry_data["id"],
"instance_name": instance_name, # Use the instance_name we extracted above
"operation_type": entry_data.get("operation_type", "missing"), # Default to "missing" if not specified
"app_type": app_type # Include app_type in the entry for display in UI
"app_type": app_type, # Include app_type in the entry for display in UI
"hunt_status": entry_data.get("hunt_status", "Not Tracked"), # Add hunt status field
# Additional metadata fields with default values
"quality": entry_data.get("quality", None),
"size_mb": entry_data.get("size_mb", None),
"protocol": entry_data.get("protocol", None),
"indexer": entry_data.get("indexer", None),
"release_group": entry_data.get("release_group", None),
"year": entry_data.get("year", None),
"imdb_id": entry_data.get("imdb_id", None),
"tmdb_id": entry_data.get("tmdb_id", None),
"tvdb_id": entry_data.get("tvdb_id", None),
"genres": entry_data.get("genres", []),
"monitored": entry_data.get("monitored", None),
"last_check": entry_data.get("last_check", None),
"attempts": entry_data.get("attempts", 0),
"notes": entry_data.get("notes", None)
}
history_file = get_history_file_path(app_type, instance_name)
@@ -233,6 +251,59 @@ def format_time_ago(seconds):
else:
return f"{seconds} {'second' if seconds == 1 else 'seconds'} ago"
def update_history_entry_status(app_type, instance_name, item_id, hunt_status):
"""
Update just the hunt status of an existing history entry, preserving the original timestamp
Parameters:
- app_type: str - The app type (sonarr, radarr, etc)
- instance_name: str - Name of the instance
- item_id: str/int - ID of the item to update
- hunt_status: str - New hunt status to set
Returns:
- bool - Success or failure
"""
if not ensure_history_dir():
logger.error("Could not ensure history directory exists")
return False
if app_type not in history_locks:
logger.error(f"Invalid app type: {app_type}")
return False
history_file = get_history_file_path(app_type, instance_name)
if not history_file.exists():
logger.warning(f"History file doesn't exist for {app_type}-{instance_name}")
return False
# Thread-safe file operation
with history_locks[app_type]:
try:
with open(history_file, 'r') as f:
history_data = json.load(f)
# Find the entry with the matching ID
updated = False
for entry in history_data:
if str(entry.get("id", "")) == str(item_id):
# Update just the hunt_status field
entry["hunt_status"] = hunt_status
updated = True
if updated:
# Write back to file
with open(history_file, 'w') as f:
json.dump(history_data, f, indent=2)
logger.info(f"Updated hunt status for {app_type}-{instance_name} ID {item_id} to '{hunt_status}'")
return True
else:
logger.warning(f"No matching entry found for {app_type}-{instance_name} ID {item_id}")
return False
except Exception as e:
logger.error(f"Error updating hunt status: {e}")
return False
def clear_history(app_type):
"""
Clear history for an app
+571
View File
@@ -0,0 +1,571 @@
"""
Unified Field Handler for Hunting Manager
This module defines the expected fields for each app type to be stored in history entries
and provides utilities for creating new entries, updating entries, and extracting data.
It eliminates any translation layer by directly handling all field operations.
"""
import logging
import time
from datetime import datetime
from typing import Dict, Any, List, Optional, Callable, Tuple
logger = logging.getLogger(__name__)
# This structure defines what data is needed for each app type
# - Required: Fields that must be present in any history entry
# - API endpoints: What API calls are needed to get the data
# - API fields: The specific fields to take from each API call (directly as-is)
# - Queue check: How to match queue items to this content type
# - Display: How to format the display name for the entry
APP_CONFIG = {
"radarr": {
"required_fields": [
"id", "title", "year", "monitored", "hasFile"
],
"api_calls": {
"primary": "get_movie_by_id", # Required, called first to get basic info
"file": { # Optional, called if primary indicates a file exists
"endpoint": "get_movie_file",
"condition": "hasFile",
"condition_value": True,
"id_source": "movieFile.id"
},
"queue": { # Always called once per batch to check queue status
"endpoint": "get_download_queue",
"match_field": "movieId" # How to match queue items to this content type
}
},
"api_fields": {
"get_movie_by_id": [
"id", "title", "year", "hasFile", "monitored",
"status", "path", "sortTitle", "overview", "images",
"added", "movieFile", "imdbId", "tmdbId", "qualityProfileId"
],
"get_movie_file": [
"id", "size", "quality", "dateAdded", "mediaInfo",
"originalFilePath", "relativePath", "resolution"
],
"get_download_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft"
]
},
"display_info": {
"name_format": "{title} ({year})",
"id_field": "id",
"default_operation": "monitored"
}
},
"sonarr": {
"required_fields": [
"id", "title", "monitored", "statistics"
],
"api_calls": {
"primary": "get_series_by_id",
"episode": { # Episode info is conditionally fetched for episode-based operations
"endpoint": "get_episode",
"multi_item": True, # Multiple episodes per series
"condition": None, # Always fetch if episode IDs are available
"id_source": "episodeIds" # List of IDs to fetch
},
"queue": {
"endpoint": "get_queue",
"match_field": "seriesId"
}
},
"api_fields": {
"get_series_by_id": [
"id", "title", "monitored", "statistics", "status",
"path", "overview", "images", "added", "seasonCount",
"seasons", "tvdbId", "imdbId", "tvMazeId", "qualityProfileId"
],
"get_episode": [
"id", "seriesId", "seasonNumber", "episodeNumber", "title",
"airDate", "airDateUtc", "hasFile", "monitored", "absoluteEpisodeNumber"
],
"get_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft", "title", "episode"
]
},
"display_info": {
"name_format": "{title}",
"id_field": "id",
"default_operation": "monitored"
}
},
"lidarr": {
"required_fields": [
"id", "artistName", "monitored", "statistics"
],
"api_calls": {
"primary": "get_artist_by_id",
"album": { # Album info is conditionally fetched
"endpoint": "get_album_by_artist_id",
"multi_item": True, # Multiple albums per artist
"condition": None, # Always fetch
"id_source": "id" # Artist ID to fetch albums for
},
"queue": {
"endpoint": "get_queue",
"match_field": "artistId"
}
},
"api_fields": {
"get_artist_by_id": [
"id", "artistName", "monitored", "statistics", "status",
"path", "overview", "images", "added", "albumCount",
"qualityProfileId", "metadataProfileId", "foreignArtistId"
],
"get_album_by_artist_id": [
"id", "title", "releaseDate", "albumType", "duration",
"monitored", "trackCount", "media", "ratings", "disambiguation"
],
"get_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft", "title", "album"
]
},
"display_info": {
"name_format": "{artistName}",
"id_field": "id",
"default_operation": "monitored"
}
},
"readarr": {
"required_fields": [
"id", "authorName", "monitored", "statistics"
],
"api_calls": {
"primary": "get_author_by_id",
"book": { # Book info is conditionally fetched
"endpoint": "get_books_by_author_id",
"multi_item": True, # Multiple books per author
"condition": None, # Always fetch
"id_source": "id" # Author ID to fetch books for
},
"queue": {
"endpoint": "get_queue",
"match_field": "authorId"
}
},
"api_fields": {
"get_author_by_id": [
"id", "authorName", "monitored", "statistics", "status",
"path", "overview", "images", "added", "qualityProfileId",
"metadataProfileId", "foreignAuthorId"
],
"get_books_by_author_id": [
"id", "title", "releaseDate", "pageCount", "overview",
"monitored", "ratings", "editions", "seriesTitle"
],
"get_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft", "title", "book"
]
},
"display_info": {
"name_format": "{authorName}",
"id_field": "id",
"default_operation": "monitored"
}
},
"whisparr": {
"required_fields": [
"id", "title", "monitored", "hasFile"
],
"api_calls": {
"primary": "get_movie_by_id",
"file": {
"endpoint": "get_movie_file",
"condition": "hasFile",
"condition_value": True,
"id_source": "movieFile.id"
},
"queue": {
"endpoint": "get_download_queue",
"match_field": "movieId"
}
},
"api_fields": {
"get_movie_by_id": [
"id", "title", "hasFile", "monitored", "status",
"path", "overview", "images", "added", "studio",
"qualityProfileId", "imdbId"
# Legacy Whisparr doesn't have these fields: genres, tags, collection
],
"get_movie_file": [
"id", "size", "quality", "dateAdded", "mediaInfo",
"originalFilePath", "relativePath", "resolution"
],
"get_download_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft"
]
},
"display_info": {
"name_format": "{title}",
"id_field": "id",
"default_operation": "monitored"
}
},
"eros": {
"required_fields": [
"id", "title", "monitored", "hasFile"
],
"api_calls": {
"primary": "get_movie_by_id", # Future Whisparr V3 (Eros branch) API structure
"file": {
"endpoint": "get_movie_file",
"condition": "hasFile",
"condition_value": True,
"id_source": "movieFile.id"
},
"queue": {
"endpoint": "get_download_queue",
"match_field": "movieId"
}
},
"api_fields": {
"get_movie_by_id": [
"id", "title", "hasFile", "monitored", "status",
"path", "overview", "images", "added", "studio",
"qualityProfileId", "imdbId", "genres", "tags", "collection"
# Eros has additional fields over legacy Whisparr: genres, tags, collection
],
"get_movie_file": [
"id", "size", "quality", "dateAdded", "mediaInfo",
"originalFilePath", "relativePath", "resolution"
],
"get_download_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft"
]
},
"display_info": {
"name_format": "{title}",
"id_field": "id",
"default_operation": "monitored"
}
},
"whisparrv2": { # This is now using the Eros API exclusively
"required_fields": [
"id", "title", "monitored", "hasFile"
],
"api_calls": {
"primary": "get_movie_by_id",
"file": {
"endpoint": "get_movie_file",
"condition": "hasFile",
"condition_value": True,
"id_source": "movieFile.id"
},
"queue": {
"endpoint": "get_download_queue",
"match_field": "movieId"
}
},
"api_fields": {
"get_movie_by_id": [
"id", "title", "hasFile", "monitored", "status",
"path", "overview", "images", "added", "studio",
"qualityProfileId", "imdbId", "genres", "tags", "collection"
# Now includes all Eros fields since it's using the Eros API exclusively
],
"get_movie_file": [
"id", "size", "quality", "dateAdded", "mediaInfo",
"originalFilePath", "relativePath", "resolution"
],
"get_download_queue": [
"status", "progress", "protocol", "downloadId",
"estimatedCompletionTime", "statusMessages", "errorMessage",
"size", "sizeleft", "timeleft"
# Added the size-related fields to match Eros configuration
]
},
"display_info": {
"name_format": "{title}",
"id_field": "id",
"default_operation": "monitored"
}
}
}
def get_nested_value(data: Dict[str, Any], field_path: str) -> Any:
"""
Extract a value from a nested dictionary using dot notation.
Examples:
get_nested_value({"a": {"b": {"c": 1}}}, "a.b.c") -> 1
get_nested_value({"a": [{"b": 1}, {"b": 2}]}, "a[0].b") -> 1
Args:
data: The dictionary to extract from
field_path: The path to the field, using dot notation
Returns:
The extracted value, or None if not found
"""
if not data or not field_path:
return None
# Handle special comparison operators for boolean conversion
if " > " in field_path:
field_part, value_part = field_path.split(" > ")
try:
actual_value = get_nested_value(data, field_part)
threshold = int(value_part)
return actual_value > threshold
except (ValueError, TypeError):
return None
# Normal field navigation
parts = field_path.split(".")
current = data
for part in parts:
# Handle array indexing like "items[0]"
if "[" in part and "]" in part:
array_part = part.split("[")[0]
index_part = part.split("[")[1].split("]")[0]
# Get the array
if array_part not in current:
return None
array_data = current[array_part]
# Get indexed item
try:
index = int(index_part)
if not isinstance(array_data, list) or index >= len(array_data):
return None
current = array_data[index]
except (ValueError, IndexError):
return None
else:
# Regular dictionary access
if part not in current:
return None
current = current[part]
return current
def fetch_api_data_for_item(app_type: str, item_id: str, api_handlers: Dict[str, Callable]) -> Tuple[Dict[str, Any], Dict[str, Any], List[Dict[str, Any]]]:
"""
Fetch all necessary API data for an item using the app configuration.
Centralizes API fetching based on the APP_CONFIG structure.
Args:
app_type: The type of app (radarr, sonarr, etc.)
item_id: The ID of the item being tracked
api_handlers: Dictionary of API handler functions (name -> function)
Returns:
Tuple of (primary_data, file_data, queue_items)
"""
app_config = APP_CONFIG.get(app_type)
if not app_config:
logger.error(f"No configuration found for app type: {app_type}")
return None, None, []
# Get API call configurations
api_calls = app_config.get("api_calls", {})
# Fetch primary data
primary_endpoint = api_calls.get("primary")
if not primary_endpoint or primary_endpoint not in api_handlers:
logger.error(f"Missing primary API endpoint for {app_type}")
return None, None, []
# Call the primary API to get basic item data
try:
primary_data = api_handlers[primary_endpoint](item_id)
if not primary_data:
logger.warning(f"No data returned from {primary_endpoint} for ID {item_id}")
return None, None, []
except Exception as e:
logger.error(f"Error fetching primary data for {app_type} item {item_id}: {str(e)}")
return None, None, []
# Fetch file data if needed
file_data = None
file_config = api_calls.get("file")
if file_config:
condition_field = file_config.get("condition")
condition_value = file_config.get("condition_value")
# Check if condition is met to fetch file data
if condition_field in primary_data and primary_data[condition_field] == condition_value:
file_endpoint = file_config.get("endpoint")
id_source = file_config.get("id_source")
if file_endpoint and id_source and file_endpoint in api_handlers:
# Get the file ID from the primary data
file_id = get_nested_value(primary_data, id_source)
if file_id:
try:
file_data = api_handlers[file_endpoint](file_id)
except Exception as e:
logger.warning(f"Error fetching file data: {str(e)}")
# Queue data is handled separately and passed in from the caller
return primary_data, file_data, []
def create_history_entry(app_type: str, instance_name: str, item_id: str,
primary_data: Dict[str, Any],
file_data: Optional[Dict[str, Any]] = None,
queue_data: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]:
"""
Create a new history entry with fields from the API responses.
This creates the exact JSON structure needed for the history manager.
Args:
app_type: The type of app (radarr, sonarr, etc.)
instance_name: The name of the app instance
item_id: The ID of the item being tracked
primary_data: Primary API data for the item
file_data: Optional file data if available
queue_data: Optional queue data if available
Returns:
Dict containing the complete history entry
"""
app_config = APP_CONFIG.get(app_type)
if not app_config or not primary_data:
logger.error(f"Cannot create history entry: missing config or data for {app_type}")
return None
# Create base entry with required metadata
timestamp = int(time.time())
entry = {
# Standard history fields
"date_time": timestamp,
"date_time_readable": datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
"id": str(item_id),
"instance_name": instance_name,
"app_type": app_type,
"operation_type": app_config["display_info"].get("default_operation", "monitored"),
"hunt_status": determine_hunt_status(app_type, primary_data, queue_data)
}
# Formatted name for display
name_format = app_config["display_info"].get("name_format", "{id}")
try:
entry["processed_info"] = name_format.format(**primary_data)
except (KeyError, ValueError):
entry["processed_info"] = f"Item {item_id}"
# Add primary API fields
api_fields = app_config.get("api_fields", {})
primary_endpoint = app_config["api_calls"].get("primary")
if primary_endpoint and primary_endpoint in api_fields:
for field in api_fields[primary_endpoint]:
if field in primary_data:
entry[field] = primary_data[field]
# Add file data fields
file_config = app_config["api_calls"].get("file")
if file_config and file_data:
file_endpoint = file_config.get("endpoint")
if file_endpoint and file_endpoint in api_fields:
for field in api_fields[file_endpoint]:
if field in file_data:
entry[f"file_{field}"] = file_data[field]
# Add queue data
if queue_data:
queue_config = app_config["api_calls"].get("queue")
if queue_config:
queue_endpoint = queue_config.get("endpoint")
match_field = queue_config.get("match_field")
# Find matching queue item
for queue_item in queue_data:
if queue_item.get(match_field) == int(item_id):
if queue_endpoint and queue_endpoint in api_fields:
for field in api_fields[queue_endpoint]:
if field in queue_item:
entry[f"queue_{field}"] = queue_item[field]
entry["in_queue"] = True
break
else:
entry["in_queue"] = False
return entry
def determine_hunt_status(app_type: str, api_data: Dict[str, Any], queue_data: Optional[List[Dict[str, Any]]] = None) -> str:
"""
Determine the hunt status based on API data and queue status.
Args:
app_type: The type of app (radarr, sonarr, etc.)
api_data: Primary API data for the item
queue_data: Optional queue data if available
Returns:
String status: "Downloaded", "Found", "Searching", or "Not Tracked"
"""
# First check for missing data
if not api_data:
return "Not Tracked"
# App-specific field checks for determining if item has a file
has_file = False
item_id = api_data.get('id')
if app_type == "radarr" or app_type == "whisparr" or app_type == "eros":
has_file = api_data.get('hasFile', False)
elif app_type == "sonarr":
has_file = api_data.get('statistics', {}).get('episodeFileCount', 0) > 0
elif app_type == "lidarr":
has_file = api_data.get('statistics', {}).get('trackFileCount', 0) > 0
elif app_type == "readarr":
has_file = api_data.get('statistics', {}).get('bookFileCount', 0) > 0
# If item has a file, it's downloaded
if has_file:
return "Downloaded"
# Check if item is in download queue
in_queue = False
if queue_data:
# Each app type uses a different ID field in the queue
id_field_map = {
"radarr": "movieId",
"sonarr": "seriesId",
"lidarr": "artistId",
"readarr": "authorId",
"whisparr": "movieId",
"eros": "movieId"
}
id_field = id_field_map.get(app_type, "id")
for queue_item in queue_data:
if queue_item.get(id_field) == item_id:
in_queue = True
break
# If in queue but not downloaded, it's found
if in_queue:
return "Found"
# If monitored but not in queue or downloaded, it's searching
if api_data.get('monitored', False):
return "Searching"
# Default fallback
return "Not Tracked"
+2 -1
View File
@@ -26,7 +26,8 @@ def log_processed_media(app_type, media_name, media_id, instance_name, operation
"name": media_name,
"id": str(media_id),
"instance_name": instance_name,
"operation_type": operation_type
"operation_type": operation_type,
"hunt_status": "Searching" # Set initial hunt status to Searching
}
result = add_history_entry(app_type, entry_data)
+99
View File
@@ -0,0 +1,99 @@
"""
Hunt Status Tracking Utility
This module provides functions to update hunt status in history entries across all *arr apps.
"""
import logging
from src.primary.utils.history_utils import add_history_entry
logger = logging.getLogger(__name__)
def update_hunt_status(app_type, instance_name, item_id, item_data, queue_status=None, operation_type="missing"):
"""
Update history with the current hunt status of an item
Parameters:
- app_type: str - The app type (sonarr, radarr, lidarr, readarr, etc.)
- instance_name: str - Name of the instance
- item_id: str/int - ID of the item being processed
- item_data: dict - Item data from the API (movie, series, album, etc.)
- queue_status: bool - Whether the item is in the download queue
- operation_type: str - Operation type for history entry (default: "missing")
Returns:
- str - The hunt status that was set
"""
# First check if there's an existing history entry to preserve the original timestamp
from src.primary.history_manager import get_history
existing_entries = get_history(app_type)
existing_entry = None
# Find the existing entry for this item_id if it exists
for entry in existing_entries.get("entries", []):
if str(entry.get("id", "")) == str(item_id) and entry.get("instance_name") == instance_name:
existing_entry = entry
break
try:
# Common fields across all *arr apps
item_name = ""
has_file = False
# App-specific handling for title and file status
if app_type == "radarr":
item_name = item_data.get('title', f"Movie ID: {item_id}")
has_file = item_data.get('hasFile', False)
elif app_type == "sonarr":
item_name = item_data.get('title', f"Series ID: {item_id}")
has_file = item_data.get('hasFile', False)
elif app_type == "lidarr":
item_name = item_data.get('title', f"Artist ID: {item_id}")
has_file = item_data.get('statistics', {}).get('trackFileCount', 0) > 0
elif app_type == "readarr":
item_name = item_data.get('title', f"Book ID: {item_id}")
has_file = item_data.get('statistics', {}).get('bookFileCount', 0) > 0
elif app_type == "whisparr" or app_type == "eros":
item_name = item_data.get('title', f"Scene ID: {item_id}")
has_file = item_data.get('hasFile', False)
else:
item_name = item_data.get('title', f"Item ID: {item_id}")
# Try common options for file status
has_file = (
item_data.get('hasFile', False) or
item_data.get('statistics', {}).get('fileCount', 0) > 0
)
# Determine hunt status based on file status and queue status
if has_file:
hunt_status = "Downloaded"
elif queue_status:
hunt_status = "Found"
else:
hunt_status = "Searching"
# Create history entry with hunt status, preserving original timestamp if available
history_entry = {
"name": item_name,
"instance_name": instance_name,
"id": item_id,
"operation_type": operation_type,
"hunt_status": hunt_status
}
# If we have an existing entry, use its timestamp and just update the hunt_status
if existing_entry:
# We're just updating the hunt status, not creating a new history entry
# This ensures timestamps remain based on original request time
from src.primary.history_manager import update_history_entry_status
update_history_entry_status(app_type, instance_name, item_id, hunt_status)
logger.info(f"[HUNTING] Updated existing history entry status for {app_type} ID {item_id}: {hunt_status}")
else:
# No existing entry found, create a new one
add_history_entry(app_type, history_entry)
logger.info(f"[HUNTING] Created new history entry with hunt status for {app_type} ID {item_id}: {hunt_status}")
return hunt_status
except Exception as e:
logger.error(f"[HUNTING] Error updating hunt status for {app_type} ID {item_id}: {e}")
return None
+115
View File
@@ -0,0 +1,115 @@
import json
import os
import time
import pathlib
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional
# Create logger
logger = logging.getLogger("hunting_manager")
class HuntingManager:
"""The modernized HuntingManager that acts as a facade to the history and stateful management systems.
This class no longer maintains its own state files or tracking directories.
Instead, it dynamically pulls data from /config/history via the history_manager
and integrates with the stateful_manager to check which IDs have been processed.
The field_mapper.py handles the actual data processing and structure.
"""
def __init__(self, config_dir: str):
"""Initialize the HuntingManager with minimal configuration.
Args:
config_dir: Base configuration directory (mostly for compatibility)
"""
self.config_dir = config_dir
self.history_dir = os.path.join(config_dir, "history")
logger.info(f"HuntingManager initialized using history data from {self.history_dir}")
def track_movie(self, item_id: str, instance_name: str, movie_info: Dict):
"""Track an item via the history manager.
This is a no-op method since tracking is handled through history_manager
and processed_ids through stateful_manager.
Args:
item_id: ID of the item to track
instance_name: Name of the app instance
movie_info: Dictionary of item information
"""
# This is now a no-op method - all tracking is done via
# history_manager (create_history_entry) and stateful_manager (add_processed_id)
pass
def update_item_status(self, app_name: str, instance_name: str, item_id: str,
new_status: str, debug_info: Optional[Dict] = None):
"""Update the status of a tracked item via history_manager.
This is a no-op method since status updates are handled through
the history_manager's update_history_entry_status method.
Args:
app_name: Type of app
instance_name: Name of the app instance
item_id: ID of the item
new_status: New status value
debug_info: Optional debug information
"""
# This is now a no-op method - status updates are handled via
# history_manager (update_history_entry_status)
pass
def get_latest_statuses(self, limit: int = 5) -> List[Dict]:
"""Get the latest hunt statuses directly from history files.
This now directly reads from the history directory to get the latest statuses.
Args:
limit: Maximum number of items to return
Returns:
List of dictionaries with status information
"""
# Import in method to avoid circular imports
from src.primary.history_manager import get_history
# Get history entries for all app types
app_types = ['radarr', 'sonarr', 'lidarr', 'readarr', 'whisparr', 'eros']
all_history = []
for app_type in app_types:
try:
# Get most recent entries for this app type
history = get_history(app_type, page=1, page_size=limit)
if history and 'items' in history:
all_history.extend(history['items'])
except Exception as e:
print(f"Error getting history for {app_type}: {e}")
# Sort by timestamp, most recent first
all_history.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
# Format for the expected output format
result = []
for entry in all_history[:limit]:
result.append({
"app_type": entry.get("app_type", "unknown"),
"instance": entry.get("instance_name", "unknown"),
"id": entry.get("item_id", "unknown"),
"name": entry.get("name", entry.get("title", "Unknown")),
"status": entry.get("hunt_status", "Unknown"),
"last_updated": entry.get("timestamp", ""),
"requested_at": entry.get("timestamp", "")
})
return result
def cleanup_old_records(self):
"""Cleanup is no longer needed as the history_manager handles record retention.
This is maintained as a no-op method for compatibility.
"""
# No longer needed - history retention is handled by history_manager
pass
+107
View File
@@ -0,0 +1,107 @@
import requests
from typing import Dict, Optional
from datetime import datetime
from .hunting_manager import HuntingManager
class RadarrHuntingManager:
def __init__(self, hunting_manager: HuntingManager):
self.hunting_manager = hunting_manager
def check_movie_status(self, instance_name: str, api_key: str, base_url: str,
movie_id: str, radarr_id: Optional[str] = None) -> Dict:
"""Check the status of a movie in Radarr."""
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# First check if the movie exists in Radarr
if radarr_id:
movie_url = f"{base_url}/api/v3/movie/{radarr_id}"
try:
response = requests.get(movie_url, headers=headers)
if response.status_code == 200:
movie_data = response.json()
return self._process_movie_status(movie_data, instance_name, movie_id)
except Exception as e:
return {
"status": "Error",
"debug_info": {
"error": str(e),
"check_type": "movie_lookup",
"timestamp": datetime.now().isoformat()
}
}
# If no radarr_id or movie not found, check the queue
queue_url = f"{base_url}/api/v3/queue"
try:
response = requests.get(queue_url, headers=headers)
if response.status_code == 200:
queue_data = response.json()
return self._process_queue_status(queue_data, instance_name, movie_id)
except Exception as e:
return {
"status": "Error",
"debug_info": {
"error": str(e),
"check_type": "queue_lookup",
"timestamp": datetime.now().isoformat()
}
}
return {
"status": "Nothing Found",
"debug_info": {
"check_type": "no_results",
"timestamp": datetime.now().isoformat()
}
}
def _process_movie_status(self, movie_data: Dict, instance_name: str,
movie_id: str) -> Dict:
"""Process the movie status from Radarr API response."""
status = "Requested"
debug_info = {
"radarr_data": movie_data,
"check_type": "movie_lookup",
"timestamp": datetime.now().isoformat()
}
if movie_data.get("hasFile", False):
status = "Found"
elif movie_data.get("monitored", False):
status = "Searching"
self.hunting_manager.update_item_status(
"radarr", instance_name, movie_id, status, debug_info
)
return {
"status": status,
"debug_info": debug_info
}
def _process_queue_status(self, queue_data: Dict, instance_name: str,
movie_id: str) -> Dict:
"""Process the queue status from Radarr API response."""
status = "Nothing Found"
debug_info = {
"radarr_data": queue_data,
"check_type": "queue_lookup",
"timestamp": datetime.now().isoformat()
}
for item in queue_data.get("records", []):
if item.get("movieId") == movie_id:
status = "Found"
break
self.hunting_manager.update_item_status(
"radarr", instance_name, movie_id, status, debug_info
)
return {
"status": status,
"debug_info": debug_info
}
+69 -1
View File
@@ -48,6 +48,9 @@ from src.primary.routes.history_routes import history_blueprint
# Import background module to trigger manual cycle resets
from src.primary import background
from src.primary.utils.hunting_manager import HuntingManager
from src.primary.utils.radarr_hunting_manager import RadarrHuntingManager
# Disable Flask default logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG) # Change to DEBUG to see all Flask/Werkzeug logs
@@ -145,15 +148,22 @@ KNOWN_LOG_FILES = {
"eros": APP_LOG_FILES.get("eros"), # Added Eros to known log files
"swaparr": APP_LOG_FILES.get("swaparr"), # Added Swaparr to known log files
"system": MAIN_LOG_FILE, # Map 'system' to the main huntarr log
"hunting": MAIN_LOG_FILE, # Map 'hunting' to the main huntarr log too
}
# Filter out None values if an app log file doesn't exist
KNOWN_LOG_FILES = {k: v for k, v in KNOWN_LOG_FILES.items() if v}
ALL_APP_LOG_FILES = list(KNOWN_LOG_FILES.values()) # List of all individual log file paths
# Initialize hunting managers
hunting_manager = HuntingManager("/config")
radarr_hunting_manager = RadarrHuntingManager(hunting_manager)
@app.route('/')
def home():
return render_template('index.html')
# Get latest hunt statuses
latest_statuses = hunting_manager.get_latest_statuses(limit=5)
return render_template('index.html', latest_hunt_statuses=latest_statuses)
@app.route('/user')
def user():
@@ -868,6 +878,64 @@ def reset_app_cycle(app_name):
'error': f"Failed to reset cycle for {app_name}. The app may not be running."
}), 500
@app.route('/api/hunt/status', methods=['GET'])
def api_hunt_status():
"""Get the latest hunt statuses."""
try:
latest_statuses = hunting_manager.get_latest_statuses(limit=5)
return jsonify({
"status": "success",
"data": latest_statuses
})
except Exception as e:
return jsonify({
"status": "error",
"message": str(e)
}), 500
@app.route('/api/hunt/settings', methods=['GET', 'POST'])
def api_hunt_settings():
"""Get or update hunt settings."""
if request.method == 'GET':
try:
return jsonify({
"status": "success",
"data": {
"follow_up_time": hunting_manager.time_config["follow_up_time"],
"max_time": hunting_manager.time_config["max_time"],
"min_time": hunting_manager.time_config["min_time"]
}
})
except Exception as e:
return jsonify({
"status": "error",
"message": str(e)
}), 500
else: # POST
try:
data = request.get_json()
if "follow_up_time" not in data:
return jsonify({
"status": "error",
"message": "follow_up_time is required"
}), 400
hunting_manager.update_time_config(data["follow_up_time"])
return jsonify({
"status": "success",
"message": "Settings updated successfully"
})
except ValueError as e:
return jsonify({
"status": "error",
"message": str(e)
}), 400
except Exception as e:
return jsonify({
"status": "error",
"message": str(e)
}), 500
# Start the web server in debug or production mode
def start_web_server():
"""Start the web server in debug or production mode"""