diff --git a/agents/patchmon-agent.sh b/agents/legacy-patchmon-agent.sh similarity index 100% rename from agents/patchmon-agent.sh rename to agents/legacy-patchmon-agent.sh diff --git a/agents/patchmon-agent-linux-386 b/agents/patchmon-agent-linux-386 new file mode 100755 index 0000000..7c808fc Binary files /dev/null and b/agents/patchmon-agent-linux-386 differ diff --git a/agents/patchmon-agent-linux-amd64 b/agents/patchmon-agent-linux-amd64 new file mode 100755 index 0000000..9ae3f78 Binary files /dev/null and b/agents/patchmon-agent-linux-amd64 differ diff --git a/agents/patchmon-agent-linux-arm64 b/agents/patchmon-agent-linux-arm64 new file mode 100755 index 0000000..ab2a49e Binary files /dev/null and b/agents/patchmon-agent-linux-arm64 differ diff --git a/agents/patchmon_install.sh b/agents/patchmon_install.sh index c6b3594..5bd6b3a 100644 --- a/agents/patchmon_install.sh +++ b/agents/patchmon_install.sh @@ -97,13 +97,22 @@ verify_datetime # Clean up old files (keep only last 3 of each type) cleanup_old_files() { # Clean up old credential backups - ls -t /etc/patchmon/credentials.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + ls -t /etc/patchmon/credentials.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + + # Clean up old config backups + ls -t /etc/patchmon/config.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f # Clean up old agent backups - ls -t /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + ls -t /usr/local/bin/patchmon-agent.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f # Clean up old log files - ls -t /var/log/patchmon-agent.log.old.* 2>/dev/null | tail -n +4 | xargs -r rm -f + ls -t /etc/patchmon/logs/patchmon-agent.log.old.* 2>/dev/null | tail -n +4 | xargs -r rm -f + + # Clean up old shell script backups (if any exist) + ls -t /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + + # Clean up old credentials backups (if any exist) + ls -t /etc/patchmon/credentials.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f } # Run cleanup at start @@ -127,6 +136,12 @@ if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then error "Missing required parameters. This script should be called via the PatchMon web interface." fi +# Parse architecture parameter (default to amd64) +ARCHITECTURE="${ARCHITECTURE:-amd64}" +if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" ]]; then + error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64" +fi + # Check if --force flag is set (for bypassing broken packages) FORCE_INSTALL="${FORCE_INSTALL:-false}" if [[ "$*" == *"--force"* ]] || [[ "$FORCE_INSTALL" == "true" ]]; then @@ -142,6 +157,7 @@ info "๐Ÿš€ Starting PatchMon Agent Installation..." info "๐Ÿ“‹ Server: $PATCHMON_URL" info "๐Ÿ”‘ API ID: ${API_ID:0:16}..." info "๐Ÿ†” Machine ID: ${MACHINE_ID:0:16}..." +info "๐Ÿ—๏ธ Architecture: $ARCHITECTURE" # Display diagnostic information echo "" @@ -150,6 +166,7 @@ echo " โ€ข URL: $PATCHMON_URL" echo " โ€ข CURL FLAGS: $CURL_FLAGS" echo " โ€ข API ID: ${API_ID:0:16}..." echo " โ€ข API Key: ${API_KEY:0:16}..." +echo " โ€ข Architecture: $ARCHITECTURE" echo "" # Install required dependencies @@ -294,67 +311,117 @@ else mkdir -p /etc/patchmon fi -# Step 2: Create credentials file -info "๐Ÿ” Creating API credentials file..." +# Step 2: Create configuration files +info "๐Ÿ” Creating configuration files..." + +# Check if config file already exists +if [[ -f "/etc/patchmon/config.yml" ]]; then + warning "โš ๏ธ Config file already exists at /etc/patchmon/config.yml" + warning "โš ๏ธ Moving existing file out of the way for fresh installation" + + # Clean up old config backups (keep only last 3) + ls -t /etc/patchmon/config.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + + # Move existing file out of the way + mv /etc/patchmon/config.yml /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S) + info "๐Ÿ“‹ Moved existing config to: /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S)" +fi # Check if credentials file already exists -if [[ -f "/etc/patchmon/credentials" ]]; then - warning "โš ๏ธ Credentials file already exists at /etc/patchmon/credentials" +if [[ -f "/etc/patchmon/credentials.yml" ]]; then + warning "โš ๏ธ Credentials file already exists at /etc/patchmon/credentials.yml" warning "โš ๏ธ Moving existing file out of the way for fresh installation" # Clean up old credential backups (keep only last 3) - ls -t /etc/patchmon/credentials.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + ls -t /etc/patchmon/credentials.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f # Move existing file out of the way - mv /etc/patchmon/credentials /etc/patchmon/credentials.backup.$(date +%Y%m%d_%H%M%S) - info "๐Ÿ“‹ Moved existing credentials to: /etc/patchmon/credentials.backup.$(date +%Y%m%d_%H%M%S)" + mv /etc/patchmon/credentials.yml /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S) + info "๐Ÿ“‹ Moved existing credentials to: /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S)" fi -cat > /etc/patchmon/credentials << EOF +# Clean up old credentials file if it exists (from previous installations) +if [[ -f "/etc/patchmon/credentials" ]]; then + warning "โš ๏ธ Found old credentials file, removing it..." + rm -f /etc/patchmon/credentials + info "๐Ÿ“‹ Removed old credentials file" +fi + +# Create main config file +cat > /etc/patchmon/config.yml << EOF +# PatchMon Agent Configuration +# Generated on $(date) +patchmon_server: "$PATCHMON_URL" +api_version: "v1" +credentials_file: "/etc/patchmon/credentials.yml" +log_file: "/etc/patchmon/logs/patchmon-agent.log" +log_level: "info" +EOF + +# Create credentials file +cat > /etc/patchmon/credentials.yml << EOF # PatchMon API Credentials # Generated on $(date) -PATCHMON_URL="$PATCHMON_URL" -API_ID="$API_ID" -API_KEY="$API_KEY" +api_id: "$API_ID" +api_key: "$API_KEY" EOF -chmod 600 /etc/patchmon/credentials -# Step 3: Download the agent script using API credentials -info "๐Ÿ“ฅ Downloading PatchMon agent script..." +chmod 600 /etc/patchmon/config.yml +chmod 600 /etc/patchmon/credentials.yml -# Check if agent script already exists -if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then - warning "โš ๏ธ Agent script already exists at /usr/local/bin/patchmon-agent.sh" +# Step 3: Download the PatchMon agent binary using API credentials +info "๐Ÿ“ฅ Downloading PatchMon agent binary..." + +# Determine the binary filename based on architecture +BINARY_NAME="patchmon-agent-linux-${ARCHITECTURE}" + +# Check if agent binary already exists +if [[ -f "/usr/local/bin/patchmon-agent" ]]; then + warning "โš ๏ธ Agent binary already exists at /usr/local/bin/patchmon-agent" warning "โš ๏ธ Moving existing file out of the way for fresh installation" # Clean up old agent backups (keep only last 3) - ls -t /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f + ls -t /usr/local/bin/patchmon-agent.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f # Move existing file out of the way - mv /usr/local/bin/patchmon-agent.sh /usr/local/bin/patchmon-agent.sh.backup.$(date +%Y%m%d_%H%M%S) - info "๐Ÿ“‹ Moved existing agent to: /usr/local/bin/patchmon-agent.sh.backup.$(date +%Y%m%d_%H%M%S)" + mv /usr/local/bin/patchmon-agent /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S) + info "๐Ÿ“‹ Moved existing agent to: /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S)" fi +# Clean up old shell script if it exists (from previous installations) +if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then + warning "โš ๏ธ Found old shell script agent, removing it..." + rm -f /usr/local/bin/patchmon-agent.sh + info "๐Ÿ“‹ Removed old shell script agent" +fi + +# Download the binary curl $CURL_FLAGS \ -H "X-API-ID: $API_ID" \ -H "X-API-KEY: $API_KEY" \ - "$PATCHMON_URL/api/v1/hosts/agent/download" \ - -o /usr/local/bin/patchmon-agent.sh + "$PATCHMON_URL/api/v1/hosts/agent/download?arch=$ARCHITECTURE" \ + -o /usr/local/bin/patchmon-agent -chmod +x /usr/local/bin/patchmon-agent.sh +chmod +x /usr/local/bin/patchmon-agent -# Get the agent version from the downloaded script -AGENT_VERSION=$(grep '^AGENT_VERSION=' /usr/local/bin/patchmon-agent.sh | cut -d'"' -f2 2>/dev/null || echo "Unknown") +# Get the agent version from the binary +AGENT_VERSION=$(/usr/local/bin/patchmon-agent version 2>/dev/null || echo "Unknown") info "๐Ÿ“‹ Agent version: $AGENT_VERSION" +# Handle existing log files and create log directory +info "๐Ÿ“ Setting up log directory..." + +# Create log directory if it doesn't exist +mkdir -p /etc/patchmon/logs + # Handle existing log files -if [[ -f "/var/log/patchmon-agent.log" ]]; then - warning "โš ๏ธ Existing log file found at /var/log/patchmon-agent.log" +if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then + warning "โš ๏ธ Existing log file found at /etc/patchmon/logs/patchmon-agent.log" warning "โš ๏ธ Rotating log file for fresh start" # Rotate the log file - mv /var/log/patchmon-agent.log /var/log/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S) - info "๐Ÿ“‹ Log file rotated to: /var/log/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)" + mv /etc/patchmon/logs/patchmon-agent.log /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S) + info "๐Ÿ“‹ Log file rotated to: /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)" fi # Step 4: Test the configuration @@ -386,19 +453,76 @@ if [[ "$http_code" == "200" ]]; then fi info "๐Ÿงช Testing API credentials and connectivity..." -if /usr/local/bin/patchmon-agent.sh test; then +if /usr/local/bin/patchmon-agent ping; then success "โœ… TEST: API credentials are valid and server is reachable" else error "โŒ Failed to validate API credentials or reach server" fi -# Step 5: Send initial data and setup automated updates +# Step 5: Send initial data and setup systemd service info "๐Ÿ“Š Sending initial package data to server..." -if /usr/local/bin/patchmon-agent.sh update; then +if /usr/local/bin/patchmon-agent report; then success "โœ… UPDATE: Initial package data sent successfully" - info "โœ… Automated updates configured by agent" else - warning "โš ๏ธ Failed to send initial data. You can retry later with: /usr/local/bin/patchmon-agent.sh update" + warning "โš ๏ธ Failed to send initial data. You can retry later with: /usr/local/bin/patchmon-agent report" +fi + +# Step 6: Setup systemd service for WebSocket connection +info "๐Ÿ”ง Setting up systemd service..." + +# Stop and disable existing service if it exists +if systemctl is-active --quiet patchmon-agent.service 2>/dev/null; then + warning "โš ๏ธ Stopping existing PatchMon agent service..." + systemctl stop patchmon-agent.service +fi + +if systemctl is-enabled --quiet patchmon-agent.service 2>/dev/null; then + warning "โš ๏ธ Disabling existing PatchMon agent service..." + systemctl disable patchmon-agent.service +fi + +# Create systemd service file +cat > /etc/systemd/system/patchmon-agent.service << EOF +[Unit] +Description=PatchMon Agent Service +After=network.target +Wants=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/bin/patchmon-agent serve +Restart=always +RestartSec=10 +WorkingDirectory=/etc/patchmon + +# Logging +StandardOutput=journal +StandardError=journal +SyslogIdentifier=patchmon-agent + +[Install] +WantedBy=multi-user.target +EOF + +# Clean up old crontab entries if they exist (from previous installations) +if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then + warning "โš ๏ธ Found old crontab entries, removing them..." + crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab - + info "๐Ÿ“‹ Removed old crontab entries" +fi + +# Reload systemd and enable/start the service +systemctl daemon-reload +systemctl enable patchmon-agent.service +systemctl start patchmon-agent.service + +# Check if service started successfully +if systemctl is-active --quiet patchmon-agent.service; then + success "โœ… PatchMon Agent service started successfully" + info "๐Ÿ”— WebSocket connection established" +else + warning "โš ๏ธ Service may have failed to start. Check status with: systemctl status patchmon-agent" fi # Installation complete @@ -406,14 +530,16 @@ success "๐ŸŽ‰ PatchMon Agent installation completed successfully!" echo "" echo -e "${GREEN}๐Ÿ“‹ Installation Summary:${NC}" echo " โ€ข Configuration directory: /etc/patchmon" -echo " โ€ข Agent installed: /usr/local/bin/patchmon-agent.sh" +echo " โ€ข Agent binary installed: /usr/local/bin/patchmon-agent" +echo " โ€ข Architecture: $ARCHITECTURE" echo " โ€ข Dependencies installed: jq, curl, bc" -echo " โ€ข Automated updates configured via crontab" +echo " โ€ข Systemd service configured and running" echo " โ€ข API credentials configured and tested" -echo " โ€ข Update schedule managed by agent" +echo " โ€ข WebSocket connection established" +echo " โ€ข Logs directory: /etc/patchmon/logs" # Check for moved files and show them -MOVED_FILES=$(ls /etc/patchmon/credentials.backup.* /usr/local/bin/patchmon-agent.sh.backup.* /var/log/patchmon-agent.log.old.* 2>/dev/null || true) +MOVED_FILES=$(ls /etc/patchmon/credentials.yml.backup.* /etc/patchmon/config.yml.backup.* /usr/local/bin/patchmon-agent.backup.* /etc/patchmon/logs/patchmon-agent.log.old.* /usr/local/bin/patchmon-agent.sh.backup.* /etc/patchmon/credentials.backup.* 2>/dev/null || true) if [[ -n "$MOVED_FILES" ]]; then echo "" echo -e "${YELLOW}๐Ÿ“‹ Files Moved for Fresh Installation:${NC}" @@ -426,8 +552,11 @@ fi echo "" echo -e "${BLUE}๐Ÿ”ง Management Commands:${NC}" -echo " โ€ข Test connection: /usr/local/bin/patchmon-agent.sh test" -echo " โ€ข Manual update: /usr/local/bin/patchmon-agent.sh update" -echo " โ€ข Check status: /usr/local/bin/patchmon-agent.sh diagnostics" +echo " โ€ข Test connection: /usr/local/bin/patchmon-agent ping" +echo " โ€ข Manual report: /usr/local/bin/patchmon-agent report" +echo " โ€ข Check status: /usr/local/bin/patchmon-agent diagnostics" +echo " โ€ข Service status: systemctl status patchmon-agent" +echo " โ€ข Service logs: journalctl -u patchmon-agent -f" +echo " โ€ข Restart service: systemctl restart patchmon-agent" echo "" success "โœ… Your system is now being monitored by PatchMon!" diff --git a/backend/env.example b/backend/env.example index 700c2c2..10d9ac3 100644 --- a/backend/env.example +++ b/backend/env.example @@ -3,6 +3,12 @@ DATABASE_URL="postgresql://patchmon_user:p@tchm0n_p@55@localhost:5432/patchmon_d PM_DB_CONN_MAX_ATTEMPTS=30 PM_DB_CONN_WAIT_INTERVAL=2 +# Redis Configuration +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD=your-redis-password-here +REDIS_DB=0 + # Server Configuration PORT=3001 NODE_ENV=development diff --git a/backend/prisma/migrations/20251016202936_add_many_to_many_host_groups/migration.sql b/backend/prisma/migrations/20251016202936_add_many_to_many_host_groups/migration.sql new file mode 100644 index 0000000..810f6a0 --- /dev/null +++ b/backend/prisma/migrations/20251016202936_add_many_to_many_host_groups/migration.sql @@ -0,0 +1,43 @@ +-- CreateTable +CREATE TABLE "host_group_memberships" ( + "id" TEXT NOT NULL, + "host_id" TEXT NOT NULL, + "host_group_id" TEXT NOT NULL, + "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + + CONSTRAINT "host_group_memberships_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "host_group_memberships_host_id_host_group_id_key" ON "host_group_memberships"("host_id", "host_group_id"); + +-- CreateIndex +CREATE INDEX "host_group_memberships_host_id_idx" ON "host_group_memberships"("host_id"); + +-- CreateIndex +CREATE INDEX "host_group_memberships_host_group_id_idx" ON "host_group_memberships"("host_group_id"); + +-- Migrate existing data from hosts.host_group_id to host_group_memberships +INSERT INTO "host_group_memberships" ("id", "host_id", "host_group_id", "created_at") +SELECT + gen_random_uuid()::text as "id", + "id" as "host_id", + "host_group_id" as "host_group_id", + CURRENT_TIMESTAMP as "created_at" +FROM "hosts" +WHERE "host_group_id" IS NOT NULL; + +-- AddForeignKey +ALTER TABLE "host_group_memberships" ADD CONSTRAINT "host_group_memberships_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "host_group_memberships" ADD CONSTRAINT "host_group_memberships_host_group_id_fkey" FOREIGN KEY ("host_group_id") REFERENCES "host_groups"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- DropForeignKey +ALTER TABLE "hosts" DROP CONSTRAINT IF EXISTS "hosts_host_group_id_fkey"; + +-- DropIndex +DROP INDEX IF EXISTS "hosts_host_group_id_idx"; + +-- AlterTable +ALTER TABLE "hosts" DROP COLUMN "host_group_id"; diff --git a/backend/prisma/schema.prisma b/backend/prisma/schema.prisma index cb31167..81230ba 100644 --- a/backend/prisma/schema.prisma +++ b/backend/prisma/schema.prisma @@ -27,10 +27,23 @@ model host_groups { color String? @default("#3B82F6") created_at DateTime @default(now()) updated_at DateTime - hosts hosts[] + host_group_memberships host_group_memberships[] auto_enrollment_tokens auto_enrollment_tokens[] } +model host_group_memberships { + id String @id + host_id String + host_group_id String + created_at DateTime @default(now()) + hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade) + host_groups host_groups @relation(fields: [host_group_id], references: [id], onDelete: Cascade) + + @@unique([host_id, host_group_id]) + @@index([host_id]) + @@index([host_group_id]) +} + model host_packages { id String @id host_id String @@ -67,41 +80,40 @@ model host_repositories { } model hosts { - id String @id - machine_id String @unique - friendly_name String - ip String? - os_type String - os_version String - architecture String? - last_update DateTime @default(now()) - status String @default("active") - created_at DateTime @default(now()) - updated_at DateTime - api_id String @unique - api_key String @unique - host_group_id String? - agent_version String? - auto_update Boolean @default(true) - cpu_cores Int? - cpu_model String? - disk_details Json? - dns_servers Json? - gateway_ip String? - hostname String? - kernel_version String? - load_average Json? - network_interfaces Json? - ram_installed Int? - selinux_status String? - swap_size Int? - system_uptime String? - notes String? - host_packages host_packages[] - host_repositories host_repositories[] - host_groups host_groups? @relation(fields: [host_group_id], references: [id]) - update_history update_history[] - job_history job_history[] + id String @id + machine_id String @unique + friendly_name String + ip String? + os_type String + os_version String + architecture String? + last_update DateTime @default(now()) + status String @default("active") + created_at DateTime @default(now()) + updated_at DateTime + api_id String @unique + api_key String @unique + agent_version String? + auto_update Boolean @default(true) + cpu_cores Int? + cpu_model String? + disk_details Json? + dns_servers Json? + gateway_ip String? + hostname String? + kernel_version String? + load_average Json? + network_interfaces Json? + ram_installed Int? + selinux_status String? + swap_size Int? + system_uptime String? + notes String? + host_packages host_packages[] + host_repositories host_repositories[] + host_group_memberships host_group_memberships[] + update_history update_history[] + job_history job_history[] @@index([machine_id]) @@index([friendly_name]) diff --git a/backend/src/routes/automationRoutes.js b/backend/src/routes/automationRoutes.js index c5789ea..f216aff 100644 --- a/backend/src/routes/automationRoutes.js +++ b/backend/src/routes/automationRoutes.js @@ -194,6 +194,30 @@ router.post( }, ); +// Trigger manual orphaned package cleanup +router.post( + "/trigger/orphaned-package-cleanup", + authenticateToken, + async (_req, res) => { + try { + const job = await queueManager.triggerOrphanedPackageCleanup(); + res.json({ + success: true, + data: { + jobId: job.id, + message: "Orphaned package cleanup triggered successfully", + }, + }); + } catch (error) { + console.error("Error triggering orphaned package cleanup:", error); + res.status(500).json({ + success: false, + error: "Failed to trigger orphaned package cleanup", + }); + } + }, +); + // Get queue health status router.get("/health", authenticateToken, async (_req, res) => { try { @@ -249,6 +273,7 @@ router.get("/overview", authenticateToken, async (_req, res) => { queueManager.getRecentJobs(QUEUE_NAMES.GITHUB_UPDATE_CHECK, 1), queueManager.getRecentJobs(QUEUE_NAMES.SESSION_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_REPO_CLEANUP, 1), + queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1), ]); @@ -257,17 +282,20 @@ router.get("/overview", authenticateToken, async (_req, res) => { scheduledTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].delayed + stats[QUEUE_NAMES.SESSION_CLEANUP].delayed + - stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed, + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed + + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed, runningTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active + stats[QUEUE_NAMES.SESSION_CLEANUP].active + - stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active, + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active + + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active, failedTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed + stats[QUEUE_NAMES.SESSION_CLEANUP].failed + - stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed, + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed + + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed, totalAutomations: Object.values(stats).reduce((sum, queueStats) => { return ( @@ -331,10 +359,10 @@ router.get("/overview", authenticateToken, async (_req, res) => { stats: stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP], }, { - name: "Collect Host Statistics", - queue: QUEUE_NAMES.AGENT_COMMANDS, - description: "Collects package statistics from connected agents only", - schedule: `Every ${settings.update_interval} minutes (Agent-driven)`, + name: "Orphaned Package Cleanup", + queue: QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, + description: "Removes packages with no associated hosts", + schedule: "Daily at 3 AM", lastRun: recentJobs[3][0]?.finishedOn ? new Date(recentJobs[3][0].finishedOn).toLocaleString() : "Never", @@ -344,6 +372,22 @@ router.get("/overview", authenticateToken, async (_req, res) => { : recentJobs[3][0] ? "Success" : "Never run", + stats: stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP], + }, + { + name: "Collect Host Statistics", + queue: QUEUE_NAMES.AGENT_COMMANDS, + description: "Collects package statistics from connected agents only", + schedule: `Every ${settings.update_interval} minutes (Agent-driven)`, + lastRun: recentJobs[4][0]?.finishedOn + ? new Date(recentJobs[4][0].finishedOn).toLocaleString() + : "Never", + lastRunTimestamp: recentJobs[4][0]?.finishedOn || 0, + status: recentJobs[4][0]?.failedReason + ? "Failed" + : recentJobs[4][0] + ? "Success" + : "Never run", stats: stats[QUEUE_NAMES.AGENT_COMMANDS], }, ].sort((a, b) => { diff --git a/backend/src/routes/dashboardRoutes.js b/backend/src/routes/dashboardRoutes.js index 73da744..54d2322 100644 --- a/backend/src/routes/dashboardRoutes.js +++ b/backend/src/routes/dashboardRoutes.js @@ -202,11 +202,15 @@ router.get("/hosts", authenticateToken, requireViewHosts, async (_req, res) => { auto_update: true, notes: true, api_id: true, - host_groups: { - select: { - id: true, - name: true, - color: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, }, }, _count: { @@ -356,11 +360,15 @@ router.get( prisma.hosts.findUnique({ where: { id: hostId }, include: { - host_groups: { - select: { - id: true, - name: true, - color: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, }, }, host_packages: { @@ -558,22 +566,34 @@ router.get( packages_count: true, security_count: true, total_packages: true, + host_id: true, + status: true, }, orderBy: { timestamp: "asc", }, }); - // Process data to show actual values (no averaging) + // Enhanced data validation and processing const processedData = trendsData - .filter((record) => record.total_packages !== null) // Only include records with valid data + .filter((record) => { + // Enhanced validation + return ( + record.total_packages !== null && + record.total_packages >= 0 && + record.packages_count >= 0 && + record.security_count >= 0 && + record.security_count <= record.packages_count && // Security can't exceed outdated + record.status === "success" + ); // Only include successful reports + }) .map((record) => { const date = new Date(record.timestamp); let timeKey; if (daysInt <= 1) { - // For hourly view, use exact timestamp - timeKey = date.toISOString().substring(0, 16); // YYYY-MM-DDTHH:MM + // For hourly view, group by hour only (not minutes) + timeKey = date.toISOString().substring(0, 13); // YYYY-MM-DDTHH } else { // For daily view, group by day timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD @@ -584,64 +604,342 @@ router.get( total_packages: record.total_packages, packages_count: record.packages_count || 0, security_count: record.security_count || 0, + host_id: record.host_id, + timestamp: record.timestamp, }; - }) - .sort((a, b) => a.timeKey.localeCompare(b.timeKey)); // Sort by time + }); - // Get hosts list for dropdown (always fetch for dropdown functionality) + // Determine if we need aggregation based on host filter + const needsAggregation = + !hostId || hostId === "all" || hostId === "undefined"; + + let aggregatedArray; + + if (needsAggregation) { + // For "All Hosts" mode, we need to calculate the actual total packages differently + // Instead of aggregating historical data (which is per-host), we'll use the current total + // and show that as a flat line, since total packages don't change much over time + + // Get the current total packages count (unique packages across all hosts) + const currentTotalPackages = await prisma.packages.count({ + where: { + host_packages: { + some: {}, // At least one host has this package + }, + }, + }); + + // Aggregate data by timeKey when looking at "All Hosts" or no specific host + const aggregatedData = processedData.reduce((acc, item) => { + if (!acc[item.timeKey]) { + acc[item.timeKey] = { + timeKey: item.timeKey, + total_packages: currentTotalPackages, // Use current total packages + packages_count: 0, + security_count: 0, + record_count: 0, + host_ids: new Set(), + min_timestamp: item.timestamp, + max_timestamp: item.timestamp, + }; + } + + // For outdated and security packages: SUM (these represent counts across hosts) + acc[item.timeKey].packages_count += item.packages_count; + acc[item.timeKey].security_count += item.security_count; + + acc[item.timeKey].record_count += 1; + acc[item.timeKey].host_ids.add(item.host_id); + + // Track timestamp range + if (item.timestamp < acc[item.timeKey].min_timestamp) { + acc[item.timeKey].min_timestamp = item.timestamp; + } + if (item.timestamp > acc[item.timeKey].max_timestamp) { + acc[item.timeKey].max_timestamp = item.timestamp; + } + + return acc; + }, {}); + + // Convert to array and add metadata + aggregatedArray = Object.values(aggregatedData) + .map((item) => ({ + ...item, + host_count: item.host_ids.size, + host_ids: Array.from(item.host_ids), + })) + .sort((a, b) => a.timeKey.localeCompare(b.timeKey)); + } else { + // For specific host, show individual data points without aggregation + // But still group by timeKey to handle multiple reports from same host in same time period + const hostAggregatedData = processedData.reduce((acc, item) => { + if (!acc[item.timeKey]) { + acc[item.timeKey] = { + timeKey: item.timeKey, + total_packages: 0, + packages_count: 0, + security_count: 0, + record_count: 0, + host_ids: new Set([item.host_id]), + min_timestamp: item.timestamp, + max_timestamp: item.timestamp, + }; + } + + // For same host, take the latest values (not sum) + // This handles cases where a host reports multiple times in the same time period + if (item.timestamp > acc[item.timeKey].max_timestamp) { + acc[item.timeKey].total_packages = item.total_packages; + acc[item.timeKey].packages_count = item.packages_count; + acc[item.timeKey].security_count = item.security_count; + acc[item.timeKey].max_timestamp = item.timestamp; + } + + acc[item.timeKey].record_count += 1; + + return acc; + }, {}); + + // Convert to array + aggregatedArray = Object.values(hostAggregatedData) + .map((item) => ({ + ...item, + host_count: item.host_ids.size, + host_ids: Array.from(item.host_ids), + })) + .sort((a, b) => a.timeKey.localeCompare(b.timeKey)); + } + + // Handle sparse data by filling missing time periods + const fillMissingPeriods = (data, daysInt) => { + const filledData = []; + const startDate = new Date(); + startDate.setDate(startDate.getDate() - daysInt); + + const dataMap = new Map(data.map((item) => [item.timeKey, item])); + + const endDate = new Date(); + const currentDate = new Date(startDate); + + // Find the last known values for interpolation + let lastKnownValues = null; + if (data.length > 0) { + lastKnownValues = { + total_packages: data[0].total_packages, + packages_count: data[0].packages_count, + security_count: data[0].security_count, + }; + } + + while (currentDate <= endDate) { + let timeKey; + if (daysInt <= 1) { + timeKey = currentDate.toISOString().substring(0, 13); // Hourly + currentDate.setHours(currentDate.getHours() + 1); + } else { + timeKey = currentDate.toISOString().split("T")[0]; // Daily + currentDate.setDate(currentDate.getDate() + 1); + } + + if (dataMap.has(timeKey)) { + const item = dataMap.get(timeKey); + filledData.push(item); + // Update last known values + lastKnownValues = { + total_packages: item.total_packages, + packages_count: item.packages_count, + security_count: item.security_count, + }; + } else { + // For missing periods, use the last known values (interpolation) + // This creates a continuous line instead of gaps + filledData.push({ + timeKey, + total_packages: lastKnownValues?.total_packages || 0, + packages_count: lastKnownValues?.packages_count || 0, + security_count: lastKnownValues?.security_count || 0, + record_count: 0, + host_count: 0, + host_ids: [], + min_timestamp: null, + max_timestamp: null, + isInterpolated: true, // Mark as interpolated for debugging + }); + } + } + + return filledData; + }; + + const finalProcessedData = fillMissingPeriods(aggregatedArray, daysInt); + + // Get hosts list for dropdown const hostsList = await prisma.hosts.findMany({ select: { id: true, friendly_name: true, hostname: true, + last_update: true, + status: true, }, orderBy: { friendly_name: "asc", }, }); + // Get current package state for offline fallback + let currentPackageState = null; + if (hostId && hostId !== "all" && hostId !== "undefined") { + // Get current package counts for specific host + const currentState = await prisma.host_packages.aggregate({ + where: { + host_id: hostId, + }, + _count: { + id: true, + }, + }); + + // Get counts for boolean fields separately + const outdatedCount = await prisma.host_packages.count({ + where: { + host_id: hostId, + needs_update: true, + }, + }); + + const securityCount = await prisma.host_packages.count({ + where: { + host_id: hostId, + is_security_update: true, + }, + }); + + currentPackageState = { + total_packages: currentState._count.id, + packages_count: outdatedCount, + security_count: securityCount, + }; + } else { + // Get current package counts for all hosts + // Total packages = count of unique packages installed on at least one host + const totalPackagesCount = await prisma.packages.count({ + where: { + host_packages: { + some: {}, // At least one host has this package + }, + }, + }); + + // Get counts for boolean fields separately + const outdatedCount = await prisma.host_packages.count({ + where: { + needs_update: true, + }, + }); + + const securityCount = await prisma.host_packages.count({ + where: { + is_security_update: true, + }, + }); + + currentPackageState = { + total_packages: totalPackagesCount, + packages_count: outdatedCount, + security_count: securityCount, + }; + } + // Format data for chart const chartData = { labels: [], datasets: [ { - label: "Total Packages", + label: needsAggregation + ? "Total Packages (All Hosts)" + : "Total Packages", data: [], borderColor: "#3B82F6", // Blue backgroundColor: "rgba(59, 130, 246, 0.1)", tension: 0.4, hidden: true, // Hidden by default + spanGaps: true, // Connect lines across missing data + pointRadius: 3, + pointHoverRadius: 5, }, { - label: "Outdated Packages", + label: needsAggregation + ? "Total Outdated Packages" + : "Outdated Packages", data: [], borderColor: "#F59E0B", // Orange backgroundColor: "rgba(245, 158, 11, 0.1)", tension: 0.4, + spanGaps: true, // Connect lines across missing data + pointRadius: 3, + pointHoverRadius: 5, }, { - label: "Security Packages", + label: needsAggregation + ? "Total Security Packages" + : "Security Packages", data: [], borderColor: "#EF4444", // Red backgroundColor: "rgba(239, 68, 68, 0.1)", tension: 0.4, + spanGaps: true, // Connect lines across missing data + pointRadius: 3, + pointHoverRadius: 5, }, ], }; // Process aggregated data - processedData.forEach((item) => { + finalProcessedData.forEach((item) => { chartData.labels.push(item.timeKey); chartData.datasets[0].data.push(item.total_packages); chartData.datasets[1].data.push(item.packages_count); chartData.datasets[2].data.push(item.security_count); }); + // Calculate data quality metrics + const dataQuality = { + totalRecords: trendsData.length, + validRecords: processedData.length, + aggregatedPoints: aggregatedArray.length, + filledPoints: finalProcessedData.length, + recordsWithNullTotal: trendsData.filter( + (r) => r.total_packages === null, + ).length, + recordsWithInvalidData: trendsData.length - processedData.length, + successfulReports: trendsData.filter((r) => r.status === "success") + .length, + failedReports: trendsData.filter((r) => r.status === "error").length, + }; + res.json({ chartData, hosts: hostsList, period: daysInt, hostId: hostId || "all", + currentPackageState, + dataQuality, + aggregationInfo: { + hasData: aggregatedArray.length > 0, + hasGaps: finalProcessedData.some((item) => item.record_count === 0), + lastDataPoint: + aggregatedArray.length > 0 + ? aggregatedArray[aggregatedArray.length - 1] + : null, + aggregationMode: needsAggregation + ? "sum_across_hosts" + : "individual_host_data", + explanation: needsAggregation + ? "Data is summed across all hosts for each time period" + : "Data shows individual host values without cross-host aggregation", + }, }); } catch (error) { console.error("Error fetching package trends:", error); @@ -650,4 +948,348 @@ router.get( }, ); +// Diagnostic endpoint to investigate package spikes +router.get( + "/package-spike-analysis", + authenticateToken, + requireViewHosts, + async (req, res) => { + try { + const { date, time, hours = 2 } = req.query; + + if (!date || !time) { + return res.status(400).json({ + error: + "Date and time parameters are required. Format: date=2025-10-17&time=18:00", + }); + } + + // Parse the specific date and time + const targetDateTime = new Date(`${date}T${time}:00`); + const startTime = new Date(targetDateTime); + startTime.setHours(startTime.getHours() - parseInt(hours, 10)); + const endTime = new Date(targetDateTime); + endTime.setHours(endTime.getHours() + parseInt(hours, 10)); + + console.log( + `Analyzing package spike around ${targetDateTime.toISOString()}`, + ); + console.log( + `Time range: ${startTime.toISOString()} to ${endTime.toISOString()}`, + ); + + // Get all update history records in the time window + const spikeData = await prisma.update_history.findMany({ + where: { + timestamp: { + gte: startTime, + lte: endTime, + }, + }, + select: { + id: true, + host_id: true, + timestamp: true, + packages_count: true, + security_count: true, + total_packages: true, + status: true, + error_message: true, + execution_time: true, + payload_size_kb: true, + hosts: { + select: { + friendly_name: true, + hostname: true, + os_type: true, + os_version: true, + }, + }, + }, + orderBy: { + timestamp: "asc", + }, + }); + + // Analyze the data + const analysis = { + timeWindow: { + start: startTime.toISOString(), + end: endTime.toISOString(), + target: targetDateTime.toISOString(), + }, + totalRecords: spikeData.length, + successfulReports: spikeData.filter((r) => r.status === "success") + .length, + failedReports: spikeData.filter((r) => r.status === "error").length, + uniqueHosts: [...new Set(spikeData.map((r) => r.host_id))].length, + hosts: {}, + timeline: [], + summary: { + maxPackagesCount: 0, + maxSecurityCount: 0, + maxTotalPackages: 0, + avgPackagesCount: 0, + avgSecurityCount: 0, + avgTotalPackages: 0, + }, + }; + + // Group by host and analyze each host's behavior + spikeData.forEach((record) => { + const hostId = record.host_id; + if (!analysis.hosts[hostId]) { + analysis.hosts[hostId] = { + hostInfo: record.hosts, + records: [], + summary: { + totalReports: 0, + successfulReports: 0, + failedReports: 0, + maxPackagesCount: 0, + maxSecurityCount: 0, + maxTotalPackages: 0, + avgPackagesCount: 0, + avgSecurityCount: 0, + avgTotalPackages: 0, + }, + }; + } + + analysis.hosts[hostId].records.push({ + timestamp: record.timestamp, + packages_count: record.packages_count, + security_count: record.security_count, + total_packages: record.total_packages, + status: record.status, + error_message: record.error_message, + execution_time: record.execution_time, + payload_size_kb: record.payload_size_kb, + }); + + analysis.hosts[hostId].summary.totalReports++; + if (record.status === "success") { + analysis.hosts[hostId].summary.successfulReports++; + analysis.hosts[hostId].summary.maxPackagesCount = Math.max( + analysis.hosts[hostId].summary.maxPackagesCount, + record.packages_count, + ); + analysis.hosts[hostId].summary.maxSecurityCount = Math.max( + analysis.hosts[hostId].summary.maxSecurityCount, + record.security_count, + ); + analysis.hosts[hostId].summary.maxTotalPackages = Math.max( + analysis.hosts[hostId].summary.maxTotalPackages, + record.total_packages || 0, + ); + } else { + analysis.hosts[hostId].summary.failedReports++; + } + }); + + // Calculate averages for each host + Object.keys(analysis.hosts).forEach((hostId) => { + const host = analysis.hosts[hostId]; + const successfulRecords = host.records.filter( + (r) => r.status === "success", + ); + + if (successfulRecords.length > 0) { + host.summary.avgPackagesCount = Math.round( + successfulRecords.reduce((sum, r) => sum + r.packages_count, 0) / + successfulRecords.length, + ); + host.summary.avgSecurityCount = Math.round( + successfulRecords.reduce((sum, r) => sum + r.security_count, 0) / + successfulRecords.length, + ); + host.summary.avgTotalPackages = Math.round( + successfulRecords.reduce( + (sum, r) => sum + (r.total_packages || 0), + 0, + ) / successfulRecords.length, + ); + } + }); + + // Create timeline with hourly/daily aggregation + const timelineMap = new Map(); + spikeData.forEach((record) => { + const timeKey = record.timestamp.toISOString().substring(0, 13); // Hourly + if (!timelineMap.has(timeKey)) { + timelineMap.set(timeKey, { + timestamp: timeKey, + totalReports: 0, + successfulReports: 0, + failedReports: 0, + totalPackagesCount: 0, + totalSecurityCount: 0, + totalTotalPackages: 0, + uniqueHosts: new Set(), + }); + } + + const timelineEntry = timelineMap.get(timeKey); + timelineEntry.totalReports++; + timelineEntry.uniqueHosts.add(record.host_id); + + if (record.status === "success") { + timelineEntry.successfulReports++; + timelineEntry.totalPackagesCount += record.packages_count; + timelineEntry.totalSecurityCount += record.security_count; + timelineEntry.totalTotalPackages += record.total_packages || 0; + } else { + timelineEntry.failedReports++; + } + }); + + // Convert timeline map to array + analysis.timeline = Array.from(timelineMap.values()) + .map((entry) => ({ + ...entry, + uniqueHosts: entry.uniqueHosts.size, + })) + .sort((a, b) => a.timestamp.localeCompare(b.timestamp)); + + // Calculate overall summary + const successfulRecords = spikeData.filter((r) => r.status === "success"); + if (successfulRecords.length > 0) { + analysis.summary.maxPackagesCount = Math.max( + ...successfulRecords.map((r) => r.packages_count), + ); + analysis.summary.maxSecurityCount = Math.max( + ...successfulRecords.map((r) => r.security_count), + ); + analysis.summary.maxTotalPackages = Math.max( + ...successfulRecords.map((r) => r.total_packages || 0), + ); + analysis.summary.avgPackagesCount = Math.round( + successfulRecords.reduce((sum, r) => sum + r.packages_count, 0) / + successfulRecords.length, + ); + analysis.summary.avgSecurityCount = Math.round( + successfulRecords.reduce((sum, r) => sum + r.security_count, 0) / + successfulRecords.length, + ); + analysis.summary.avgTotalPackages = Math.round( + successfulRecords.reduce( + (sum, r) => sum + (r.total_packages || 0), + 0, + ) / successfulRecords.length, + ); + } + + // Identify potential causes of the spike + const potentialCauses = []; + + // Check for hosts with unusually high package counts + Object.keys(analysis.hosts).forEach((hostId) => { + const host = analysis.hosts[hostId]; + if ( + host.summary.maxPackagesCount > + analysis.summary.avgPackagesCount * 2 + ) { + potentialCauses.push({ + type: "high_package_count", + hostId, + hostName: host.hostInfo.friendly_name || host.hostInfo.hostname, + value: host.summary.maxPackagesCount, + avg: analysis.summary.avgPackagesCount, + }); + } + }); + + // Check for multiple hosts reporting at the same time (this explains the 500 vs 59 discrepancy) + const concurrentReports = analysis.timeline.filter( + (entry) => entry.uniqueHosts > 1, + ); + if (concurrentReports.length > 0) { + potentialCauses.push({ + type: "concurrent_reports", + description: + "Multiple hosts reported simultaneously - this explains why chart shows higher numbers than individual host reports", + count: concurrentReports.length, + details: concurrentReports.map((entry) => ({ + timestamp: entry.timestamp, + totalPackagesCount: entry.totalPackagesCount, + uniqueHosts: entry.uniqueHosts, + avgPerHost: Math.round( + entry.totalPackagesCount / entry.uniqueHosts, + ), + })), + explanation: + "The chart sums package counts across all hosts. If multiple hosts report at the same time, the chart shows the total sum, not individual host counts.", + }); + } + + // Check for failed reports that might indicate system issues + if (analysis.failedReports > 0) { + potentialCauses.push({ + type: "failed_reports", + count: analysis.failedReports, + percentage: Math.round( + (analysis.failedReports / analysis.totalRecords) * 100, + ), + }); + } + + // Add aggregation explanation + const aggregationExplanation = { + type: "aggregation_explanation", + description: "Chart Aggregation Logic", + details: { + howItWorks: + "The package trends chart sums package counts across all hosts for each time period", + individualHosts: + "Each host reports its own package count (e.g., 59 packages)", + chartDisplay: + "Chart shows the sum of all hosts' package counts (e.g., 59 + other hosts = 500)", + timeGrouping: + "Multiple hosts reporting in the same hour/day are aggregated together", + }, + example: { + host1: "Host A reports 59 outdated packages", + host2: "Host B reports 120 outdated packages", + host3: "Host C reports 321 outdated packages", + chartShows: "Chart displays 500 total packages (59+120+321)", + }, + }; + potentialCauses.push(aggregationExplanation); + + // Add specific host breakdown if a host ID is provided + let specificHostAnalysis = null; + if (req.query.hostId) { + const hostId = req.query.hostId; + const hostData = analysis.hosts[hostId]; + if (hostData) { + specificHostAnalysis = { + hostId, + hostInfo: hostData.hostInfo, + summary: hostData.summary, + records: hostData.records, + explanation: `This host reported ${hostData.summary.maxPackagesCount} outdated packages, but the chart shows ${analysis.summary.maxPackagesCount} because it sums across all hosts that reported at the same time.`, + }; + } + } + + res.json({ + analysis, + potentialCauses, + specificHostAnalysis, + recommendations: [ + "Check if any hosts had major package updates around this time", + "Verify if any new hosts were added to the system", + "Check for system maintenance or updates that might have triggered package checks", + "Review any automation or scheduled tasks that run around 6pm", + "Check if any repositories were updated or new packages were released", + "Remember: Chart shows SUM of all hosts' package counts, not individual host counts", + ], + }); + } catch (error) { + console.error("Error analyzing package spike:", error); + res.status(500).json({ error: "Failed to analyze package spike" }); + } + }, +); + module.exports = router; diff --git a/backend/src/routes/hostGroupRoutes.js b/backend/src/routes/hostGroupRoutes.js index 42300be..5b44bbb 100644 --- a/backend/src/routes/hostGroupRoutes.js +++ b/backend/src/routes/hostGroupRoutes.js @@ -15,7 +15,7 @@ router.get("/", authenticateToken, async (_req, res) => { include: { _count: { select: { - hosts: true, + host_group_memberships: true, }, }, }, @@ -39,16 +39,20 @@ router.get("/:id", authenticateToken, async (req, res) => { const hostGroup = await prisma.host_groups.findUnique({ where: { id }, include: { - hosts: { - select: { - id: true, - friendly_name: true, - hostname: true, - ip: true, - os_type: true, - os_version: true, - status: true, - last_update: true, + host_group_memberships: { + include: { + hosts: { + select: { + id: true, + friendly_name: true, + hostname: true, + ip: true, + os_type: true, + os_version: true, + status: true, + last_update: true, + }, + }, }, }, }, @@ -195,7 +199,7 @@ router.delete( include: { _count: { select: { - hosts: true, + host_group_memberships: true, }, }, }, @@ -205,11 +209,10 @@ router.delete( return res.status(404).json({ error: "Host group not found" }); } - // If host group has hosts, ungroup them first - if (existingGroup._count.hosts > 0) { - await prisma.hosts.updateMany({ + // If host group has memberships, remove them first + if (existingGroup._count.host_group_memberships > 0) { + await prisma.host_group_memberships.deleteMany({ where: { host_group_id: id }, - data: { host_group_id: null }, }); } @@ -231,7 +234,13 @@ router.get("/:id/hosts", authenticateToken, async (req, res) => { const { id } = req.params; const hosts = await prisma.hosts.findMany({ - where: { host_group_id: id }, + where: { + host_group_memberships: { + some: { + host_group_id: id, + }, + }, + }, select: { id: true, friendly_name: true, diff --git a/backend/src/routes/hostRoutes.js b/backend/src/routes/hostRoutes.js index 9a13563..bbec5f3 100644 --- a/backend/src/routes/hostRoutes.js +++ b/backend/src/routes/hostRoutes.js @@ -14,7 +14,7 @@ const { const router = express.Router(); const prisma = new PrismaClient(); -// Secure endpoint to download the agent script (requires API authentication) +// Secure endpoint to download the agent binary (requires API authentication) router.get("/agent/download", async (req, res) => { try { // Verify API credentials @@ -34,46 +34,50 @@ router.get("/agent/download", async (req, res) => { return res.status(401).json({ error: "Invalid API credentials" }); } - // Serve agent script directly from file system + // Get architecture parameter (default to amd64) + const architecture = req.query.arch || "amd64"; + + // Validate architecture + const validArchitectures = ["amd64", "386", "arm64"]; + if (!validArchitectures.includes(architecture)) { + return res.status(400).json({ + error: "Invalid architecture. Must be one of: amd64, 386, arm64", + }); + } + + // Serve agent binary directly from file system const fs = require("node:fs"); const path = require("node:path"); - const agentPath = path.join(__dirname, "../../../agents/patchmon-agent.sh"); + const binaryName = `patchmon-agent-linux-${architecture}`; + const binaryPath = path.join(__dirname, "../../../agents", binaryName); - if (!fs.existsSync(agentPath)) { - return res.status(404).json({ error: "Agent script not found" }); + if (!fs.existsSync(binaryPath)) { + return res.status(404).json({ + error: `Agent binary not found for architecture: ${architecture}`, + }); } - // Read file and convert line endings - let scriptContent = fs - .readFileSync(agentPath, "utf8") - .replace(/\r\n/g, "\n") - .replace(/\r/g, "\n"); - - // Determine curl flags dynamically from settings for consistency - let curlFlags = "-s"; - try { - const settings = await prisma.settings.findFirst(); - if (settings && settings.ignore_ssl_self_signed === true) { - curlFlags = "-sk"; - } - } catch (_) {} - - // Inject the curl flags into the script - scriptContent = scriptContent.replace( - 'CURL_FLAGS=""', - `CURL_FLAGS="${curlFlags}"`, - ); - - res.setHeader("Content-Type", "application/x-shellscript"); + // Set appropriate headers for binary download + res.setHeader("Content-Type", "application/octet-stream"); res.setHeader( "Content-Disposition", - 'attachment; filename="patchmon-agent.sh"', + `attachment; filename="${binaryName}"`, ); - res.send(scriptContent); + + // Stream the binary file + const fileStream = fs.createReadStream(binaryPath); + fileStream.pipe(res); + + fileStream.on("error", (error) => { + console.error("Binary stream error:", error); + if (!res.headersSent) { + res.status(500).json({ error: "Failed to stream agent binary" }); + } + }); } catch (error) { console.error("Agent download error:", error); - res.status(500).json({ error: "Failed to download agent script" }); + res.status(500).json({ error: "Failed to serve agent binary" }); } }); @@ -158,7 +162,14 @@ router.post( body("friendly_name") .isLength({ min: 1 }) .withMessage("Friendly name is required"), - body("hostGroupId").optional(), + body("hostGroupIds") + .optional() + .isArray() + .withMessage("Host group IDs must be an array"), + body("hostGroupIds.*") + .optional() + .isUUID() + .withMessage("Each host group ID must be a valid UUID"), ], async (req, res) => { try { @@ -167,19 +178,21 @@ router.post( return res.status(400).json({ errors: errors.array() }); } - const { friendly_name, hostGroupId } = req.body; + const { friendly_name, hostGroupIds } = req.body; // Generate unique API credentials for this host const { apiId, apiKey } = generateApiCredentials(); - // If hostGroupId is provided, verify the group exists - if (hostGroupId) { - const hostGroup = await prisma.host_groups.findUnique({ - where: { id: hostGroupId }, + // If hostGroupIds is provided, verify all groups exist + if (hostGroupIds && hostGroupIds.length > 0) { + const hostGroups = await prisma.host_groups.findMany({ + where: { id: { in: hostGroupIds } }, }); - if (!hostGroup) { - return res.status(400).json({ error: "Host group not found" }); + if (hostGroups.length !== hostGroupIds.length) { + return res + .status(400) + .json({ error: "One or more host groups not found" }); } } @@ -195,16 +208,31 @@ router.post( architecture: null, // Will be updated when agent connects api_id: apiId, api_key: apiKey, - host_group_id: hostGroupId || null, status: "pending", // Will change to 'active' when agent connects updated_at: new Date(), + // Create host group memberships if hostGroupIds are provided + host_group_memberships: + hostGroupIds && hostGroupIds.length > 0 + ? { + create: hostGroupIds.map((groupId) => ({ + id: uuidv4(), + host_groups: { + connect: { id: groupId }, + }, + })), + } + : undefined, }, include: { - host_groups: { - select: { - id: true, - name: true, - color: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, }, }, }, @@ -216,7 +244,10 @@ router.post( friendlyName: host.friendly_name, apiId: host.api_id, apiKey: host.api_key, - hostGroup: host.host_groups, + hostGroups: + host.host_group_memberships?.map( + (membership) => membership.host_groups, + ) || [], instructions: "Use these credentials in your patchmon agent configuration. System information will be automatically detected when the agent connects.", }); @@ -732,18 +763,96 @@ router.post( }, ); -// Admin endpoint to bulk update host groups +// TODO: Admin endpoint to bulk update host groups - needs to be rewritten for many-to-many relationship +// router.put( +// "/bulk/group", +// authenticateToken, +// requireManageHosts, +// [ +// body("hostIds").isArray().withMessage("Host IDs must be an array"), +// body("hostIds.*") +// .isLength({ min: 1 }) +// .withMessage("Each host ID must be provided"), +// body("hostGroupId").optional(), +// ], +// async (req, res) => { +// try { +// const errors = validationResult(req); +// if (!errors.isEmpty()) { +// return res.status(400).json({ errors: errors.array() }); +// } + +// const { hostIds, hostGroupId } = req.body; + +// // If hostGroupId is provided, verify the group exists +// if (hostGroupId) { +// const hostGroup = await prisma.host_groups.findUnique({ +// where: { id: hostGroupId }, +// }); + +// if (!hostGroup) { +// return res.status(400).json({ error: "Host group not found" }); +// } +// } + +// // Check if all hosts exist +// const existingHosts = await prisma.hosts.findMany({ +// where: { id: { in: hostIds } }, +// select: { id: true, friendly_name: true }, +// }); + +// if (existingHosts.length !== hostIds.length) { +// const foundIds = existingHosts.map((h) => h.id); +// const missingIds = hostIds.filter((id) => !foundIds.includes(id)); +// return res.status(400).json({ +// error: "Some hosts not found", +// missingHostIds: missingIds, +// }); +// } + +// // Bulk update host groups +// const updateResult = await prisma.hosts.updateMany({ +// where: { id: { in: hostIds } }, +// data: { +// host_group_id: hostGroupId || null, +// updated_at: new Date(), +// }, +// }); + +// // Get updated hosts with group information +// const updatedHosts = await prisma.hosts.findMany({ +// where: { id: { in: hostIds } }, +// select: { +// id: true, +// friendly_name: true, +// host_groups: { +// select: { +// id: true, +// name: true, +// color: true, +// }, +// }, +// }, +// }); + +// res.json({ +// message: `Successfully updated ${updateResult.count} host${updateResult.count !== 1 ? "s" : ""}`, +// updatedCount: updateResult.count, +// hosts: updatedHosts, +// }); +// } catch (error) { +// console.error("Bulk host group update error:", error); +// res.status(500).json({ error: "Failed to update host groups" }); +// } +// }, +// ); + +// Admin endpoint to update host groups (many-to-many) router.put( - "/bulk/group", + "/:hostId/groups", authenticateToken, requireManageHosts, - [ - body("hostIds").isArray().withMessage("Host IDs must be an array"), - body("hostIds.*") - .isLength({ min: 1 }) - .withMessage("Each host ID must be provided"), - body("hostGroupId").optional(), - ], + [body("groupIds").isArray().optional()], async (req, res) => { try { const errors = validationResult(req); @@ -751,72 +860,83 @@ router.put( return res.status(400).json({ errors: errors.array() }); } - const { hostIds, hostGroupId } = req.body; + const { hostId } = req.params; + const { groupIds = [] } = req.body; - // If hostGroupId is provided, verify the group exists - if (hostGroupId) { - const hostGroup = await prisma.host_groups.findUnique({ - where: { id: hostGroupId }, + // Check if host exists + const host = await prisma.hosts.findUnique({ + where: { id: hostId }, + }); + + if (!host) { + return res.status(404).json({ error: "Host not found" }); + } + + // Verify all groups exist + if (groupIds.length > 0) { + const existingGroups = await prisma.host_groups.findMany({ + where: { id: { in: groupIds } }, + select: { id: true }, }); - if (!hostGroup) { - return res.status(400).json({ error: "Host group not found" }); + if (existingGroups.length !== groupIds.length) { + return res.status(400).json({ + error: "One or more host groups not found", + provided: groupIds, + found: existingGroups.map((g) => g.id), + }); } } - // Check if all hosts exist - const existingHosts = await prisma.hosts.findMany({ - where: { id: { in: hostIds } }, - select: { id: true, friendly_name: true }, - }); - - if (existingHosts.length !== hostIds.length) { - const foundIds = existingHosts.map((h) => h.id); - const missingIds = hostIds.filter((id) => !foundIds.includes(id)); - return res.status(400).json({ - error: "Some hosts not found", - missingHostIds: missingIds, + // Use transaction to update group memberships + const updatedHost = await prisma.$transaction(async (tx) => { + // Remove existing memberships + await tx.host_group_memberships.deleteMany({ + where: { host_id: hostId }, }); - } - // Bulk update host groups - const updateResult = await prisma.hosts.updateMany({ - where: { id: { in: hostIds } }, - data: { - host_group_id: hostGroupId || null, - updated_at: new Date(), - }, - }); + // Add new memberships + if (groupIds.length > 0) { + await tx.host_group_memberships.createMany({ + data: groupIds.map((groupId) => ({ + id: crypto.randomUUID(), + host_id: hostId, + host_group_id: groupId, + })), + }); + } - // Get updated hosts with group information - const updatedHosts = await prisma.hosts.findMany({ - where: { id: { in: hostIds } }, - select: { - id: true, - friendly_name: true, - host_groups: { - select: { - id: true, - name: true, - color: true, + // Return updated host with groups + return await tx.hosts.findUnique({ + where: { id: hostId }, + include: { + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, + }, }, }, - }, + }); }); res.json({ - message: `Successfully updated ${updateResult.count} host${updateResult.count !== 1 ? "s" : ""}`, - updatedCount: updateResult.count, - hosts: updatedHosts, + message: "Host groups updated successfully", + host: updatedHost, }); } catch (error) { - console.error("Bulk host group update error:", error); + console.error("Host groups update error:", error); res.status(500).json({ error: "Failed to update host groups" }); } }, ); -// Admin endpoint to update host group +// Legacy endpoint to update single host group (for backward compatibility) router.put( "/:hostId/group", authenticateToken, @@ -832,6 +952,9 @@ router.put( const { hostId } = req.params; const { hostGroupId } = req.body; + // Convert single group to array and use the new endpoint logic + const _groupIds = hostGroupId ? [hostGroupId] : []; + // Check if host exists const host = await prisma.hosts.findUnique({ where: { id: hostId }, @@ -841,7 +964,7 @@ router.put( return res.status(404).json({ error: "Host not found" }); } - // If hostGroupId is provided, verify the group exists + // Verify group exists if provided if (hostGroupId) { const hostGroup = await prisma.host_groups.findUnique({ where: { id: hostGroupId }, @@ -852,22 +975,41 @@ router.put( } } - // Update host group - const updatedHost = await prisma.hosts.update({ - where: { id: hostId }, - data: { - host_group_id: hostGroupId || null, - updated_at: new Date(), - }, - include: { - host_groups: { - select: { - id: true, - name: true, - color: true, + // Use transaction to update group memberships + const updatedHost = await prisma.$transaction(async (tx) => { + // Remove existing memberships + await tx.host_group_memberships.deleteMany({ + where: { host_id: hostId }, + }); + + // Add new membership if group provided + if (hostGroupId) { + await tx.host_group_memberships.create({ + data: { + id: crypto.randomUUID(), + host_id: hostId, + host_group_id: hostGroupId, + }, + }); + } + + // Return updated host with groups + return await tx.hosts.findUnique({ + where: { id: hostId }, + include: { + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, + }, }, }, - }, + }); }); res.json({ @@ -903,13 +1045,16 @@ router.get( agent_version: true, auto_update: true, created_at: true, - host_group_id: true, notes: true, - host_groups: { - select: { - id: true, - name: true, - color: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, }, }, }, @@ -1175,13 +1320,17 @@ router.get("/install", async (req, res) => { // Check for --force parameter const forceInstall = req.query.force === "true" || req.query.force === "1"; - // Inject the API credentials, server URL, curl flags, and force flag into the script + // Get architecture parameter (default to amd64) + const architecture = req.query.arch || "amd64"; + + // Inject the API credentials, server URL, curl flags, force flag, and architecture into the script const envVars = `#!/bin/bash export PATCHMON_URL="${serverUrl}" export API_ID="${host.api_id}" export API_KEY="${host.api_key}" export CURL_FLAGS="${curlFlags}" export FORCE_INSTALL="${forceInstall ? "true" : "false"}" +export ARCHITECTURE="${architecture}" `; @@ -1558,16 +1707,16 @@ router.patch( architecture: true, last_update: true, status: true, - host_group_id: true, - agent_version: true, - auto_update: true, - created_at: true, updated_at: true, - host_groups: { - select: { - id: true, - name: true, - color: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, }, }, }, @@ -1631,17 +1780,16 @@ router.patch( architecture: true, last_update: true, status: true, - host_group_id: true, - agent_version: true, - auto_update: true, - created_at: true, - updated_at: true, notes: true, - host_groups: { - select: { - id: true, - name: true, - color: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + color: true, + }, + }, }, }, }, diff --git a/backend/src/routes/settingsRoutes.js b/backend/src/routes/settingsRoutes.js index 1bfed45..e1cfb72 100644 --- a/backend/src/routes/settingsRoutes.js +++ b/backend/src/routes/settingsRoutes.js @@ -8,8 +8,8 @@ const { getSettings, updateSettings } = require("../services/settingsService"); const router = express.Router(); const prisma = new PrismaClient(); -// WebSocket broadcaster for agent policy updates -const { broadcastSettingsUpdate } = require("../services/agentWs"); +// WebSocket broadcaster for agent policy updates (no longer used - queue-based delivery preferred) +// const { broadcastSettingsUpdate } = require("../services/agentWs"); const { queueManager, QUEUE_NAMES } = require("../services/automation"); // Helpers @@ -225,9 +225,8 @@ router.put( // Bulk add jobs await queue.addBulk(jobs); - // Also broadcast immediately to currently connected agents (best-effort) - // This ensures agents receive the change even if their host status isn't active yet - broadcastSettingsUpdate(updateData.update_interval); + // Note: Queue-based delivery handles retries and ensures reliable delivery + // No need for immediate broadcast as it would cause duplicate messages } res.json({ diff --git a/backend/src/routes/wsRoutes.js b/backend/src/routes/wsRoutes.js index fccec1d..ae42209 100644 --- a/backend/src/routes/wsRoutes.js +++ b/backend/src/routes/wsRoutes.js @@ -4,6 +4,10 @@ const { getConnectionInfo, subscribeToConnectionChanges, } = require("../services/agentWs"); +const { + validate_session, + update_session_activity, +} = require("../utils/session_manager"); const router = express.Router(); @@ -41,12 +45,25 @@ router.get("/status/:apiId/stream", async (req, res) => { return res.status(401).json({ error: "Authentication required" }); } - // Verify token manually + // Verify token manually with session validation const jwt = require("jsonwebtoken"); try { const decoded = jwt.verify(token, process.env.JWT_SECRET); - req.user = decoded; - } catch (_err) { + + // Validate session (same as regular auth middleware) + const validation = await validate_session(decoded.sessionId, token); + if (!validation.valid) { + console.error("[SSE] Session validation failed:", validation.reason); + console.error("[SSE] Invalid session for api_id:", apiId); + return res.status(401).json({ error: "Invalid or expired session" }); + } + + // Update session activity to prevent inactivity timeout + await update_session_activity(decoded.sessionId); + + req.user = validation.user; + } catch (err) { + console.error("[SSE] JWT verification failed:", err.message); console.error("[SSE] Invalid token for api_id:", apiId); return res.status(401).json({ error: "Invalid or expired token" }); } @@ -95,9 +112,23 @@ router.get("/status/:apiId/stream", async (req, res) => { unsubscribe(); }); - // Handle errors + // Handle errors - distinguish between different error types req.on("error", (err) => { - console.error("[SSE] Request error:", err); + // Only log non-connection-reset errors to reduce noise + if (err.code !== "ECONNRESET" && err.code !== "EPIPE") { + console.error("[SSE] Request error:", err); + } else { + console.log("[SSE] Client connection reset for api_id:", apiId); + } + clearInterval(heartbeat); + unsubscribe(); + }); + + // Handle response errors + res.on("error", (err) => { + if (err.code !== "ECONNRESET" && err.code !== "EPIPE") { + console.error("[SSE] Response error:", err); + } clearInterval(heartbeat); unsubscribe(); }); diff --git a/backend/src/services/automation/index.js b/backend/src/services/automation/index.js index 9a960d5..244ef5f 100644 --- a/backend/src/services/automation/index.js +++ b/backend/src/services/automation/index.js @@ -7,12 +7,14 @@ const agentWs = require("../agentWs"); const GitHubUpdateCheck = require("./githubUpdateCheck"); const SessionCleanup = require("./sessionCleanup"); const OrphanedRepoCleanup = require("./orphanedRepoCleanup"); +const OrphanedPackageCleanup = require("./orphanedPackageCleanup"); // Queue names const QUEUE_NAMES = { GITHUB_UPDATE_CHECK: "github-update-check", SESSION_CLEANUP: "session-cleanup", ORPHANED_REPO_CLEANUP: "orphaned-repo-cleanup", + ORPHANED_PACKAGE_CLEANUP: "orphaned-package-cleanup", AGENT_COMMANDS: "agent-commands", }; @@ -87,6 +89,8 @@ class QueueManager { this.automations[QUEUE_NAMES.SESSION_CLEANUP] = new SessionCleanup(this); this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP] = new OrphanedRepoCleanup(this); + this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP] = + new OrphanedPackageCleanup(this); console.log("โœ… All automation classes initialized"); } @@ -131,6 +135,18 @@ class QueueManager { }, ); + // Orphaned Package Cleanup Worker + this.workers[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP] = new Worker( + QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, + this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].process.bind( + this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP], + ), + { + connection: redisConnection, + concurrency: 1, + }, + ); + // Agent Commands Worker this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker( QUEUE_NAMES.AGENT_COMMANDS, @@ -317,6 +333,7 @@ class QueueManager { await this.automations[QUEUE_NAMES.GITHUB_UPDATE_CHECK].schedule(); await this.automations[QUEUE_NAMES.SESSION_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].schedule(); + await this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].schedule(); } /** @@ -334,6 +351,12 @@ class QueueManager { return this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].triggerManual(); } + async triggerOrphanedPackageCleanup() { + return this.automations[ + QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP + ].triggerManual(); + } + /** * Get queue statistics */ diff --git a/backend/src/services/automation/orphanedPackageCleanup.js b/backend/src/services/automation/orphanedPackageCleanup.js new file mode 100644 index 0000000..fa2b726 --- /dev/null +++ b/backend/src/services/automation/orphanedPackageCleanup.js @@ -0,0 +1,116 @@ +const { prisma } = require("./shared/prisma"); + +/** + * Orphaned Package Cleanup Automation + * Removes packages with no associated hosts + */ +class OrphanedPackageCleanup { + constructor(queueManager) { + this.queueManager = queueManager; + this.queueName = "orphaned-package-cleanup"; + } + + /** + * Process orphaned package cleanup job + */ + async process(_job) { + const startTime = Date.now(); + console.log("๐Ÿงน Starting orphaned package cleanup..."); + + try { + // Find packages with 0 hosts + const orphanedPackages = await prisma.packages.findMany({ + where: { + host_packages: { + none: {}, + }, + }, + include: { + _count: { + select: { + host_packages: true, + }, + }, + }, + }); + + let deletedCount = 0; + const deletedPackages = []; + + // Delete orphaned packages + for (const pkg of orphanedPackages) { + try { + await prisma.packages.delete({ + where: { id: pkg.id }, + }); + deletedCount++; + deletedPackages.push({ + id: pkg.id, + name: pkg.name, + description: pkg.description, + category: pkg.category, + latest_version: pkg.latest_version, + }); + console.log( + `๐Ÿ—‘๏ธ Deleted orphaned package: ${pkg.name} (${pkg.latest_version})`, + ); + } catch (deleteError) { + console.error( + `โŒ Failed to delete package ${pkg.id}:`, + deleteError.message, + ); + } + } + + const executionTime = Date.now() - startTime; + console.log( + `โœ… Orphaned package cleanup completed in ${executionTime}ms - Deleted ${deletedCount} packages`, + ); + + return { + success: true, + deletedCount, + deletedPackages, + executionTime, + }; + } catch (error) { + const executionTime = Date.now() - startTime; + console.error( + `โŒ Orphaned package cleanup failed after ${executionTime}ms:`, + error.message, + ); + throw error; + } + } + + /** + * Schedule recurring orphaned package cleanup (daily at 3 AM) + */ + async schedule() { + const job = await this.queueManager.queues[this.queueName].add( + "orphaned-package-cleanup", + {}, + { + repeat: { cron: "0 3 * * *" }, // Daily at 3 AM + jobId: "orphaned-package-cleanup-recurring", + }, + ); + console.log("โœ… Orphaned package cleanup scheduled"); + return job; + } + + /** + * Trigger manual orphaned package cleanup + */ + async triggerManual() { + const job = await this.queueManager.queues[this.queueName].add( + "orphaned-package-cleanup-manual", + {}, + { priority: 1 }, + ); + console.log("โœ… Manual orphaned package cleanup triggered"); + return job; + } +} + +module.exports = OrphanedPackageCleanup; diff --git a/docker/README.md b/docker/README.md index bbe38e3..d0667c4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,9 +2,10 @@ ## Overview -PatchMon is a containerised application that monitors system patches and updates. The application consists of three main services: +PatchMon is a containerised application that monitors system patches and updates. The application consists of four main services: - **Database**: PostgreSQL 17 +- **Redis**: Redis 7 for BullMQ job queues and caching - **Backend**: Node.js API server - **Frontend**: React application served via NGINX @@ -38,21 +39,31 @@ These tags are available for both backend and frontend images as they are versio environment: DATABASE_URL: postgresql://patchmon_user:REPLACE_YOUR_POSTGRES_PASSWORD_HERE@database:5432/patchmon_db ``` -4. Generate a strong JWT secret. You can do this like so: +4. Set a Redis password in the Redis service where it says: + ```yaml + environment: + REDIS_PASSWORD: # CREATE A STRONG REDIS PASSWORD AND PUT IT HERE + ``` +5. Update the corresponding `REDIS_PASSWORD` in the backend service where it says: + ```yaml + environment: + REDIS_PASSWORD: REPLACE_YOUR_REDIS_PASSWORD_HERE + ``` +6. Generate a strong JWT secret. You can do this like so: ```bash openssl rand -hex 64 ``` -5. Set a JWT secret in the backend service where it says: +7. Set a JWT secret in the backend service where it says: ```yaml environment: JWT_SECRET: # CREATE A STRONG SECRET AND PUT IT HERE ``` -6. Configure environment variables (see [Configuration](#configuration) section) -7. Start the application: +8. Configure environment variables (see [Configuration](#configuration) section) +9. Start the application: ```bash docker compose up -d ``` -8. Access the application at `http://localhost:3000` +10. Access the application at `http://localhost:3000` ## Updating @@ -106,6 +117,12 @@ When you do this, updating to a new version requires manually updating the image | `POSTGRES_USER` | Database user | `patchmon_user` | | `POSTGRES_PASSWORD` | Database password | **MUST BE SET!** | +#### Redis Service + +| Variable | Description | Default | +| -------------- | ------------------ | ---------------- | +| `REDIS_PASSWORD` | Redis password | **MUST BE SET!** | + #### Backend Service ##### Database Configuration @@ -116,6 +133,15 @@ When you do this, updating to a new version requires manually updating the image | `PM_DB_CONN_MAX_ATTEMPTS` | Maximum database connection attempts | `30` | | `PM_DB_CONN_WAIT_INTERVAL` | Wait interval between connection attempts in seconds | `2` | +##### Redis Configuration + +| Variable | Description | Default | +| --------------- | ------------------------------ | ------- | +| `REDIS_HOST` | Redis server hostname | `redis` | +| `REDIS_PORT` | Redis server port | `6379` | +| `REDIS_PASSWORD` | Redis authentication password | **MUST BE UPDATED WITH YOUR REDIS_PASSWORD!** | +| `REDIS_DB` | Redis database number | `0` | + ##### Authentication & Security | Variable | Description | Default | @@ -165,9 +191,10 @@ When you do this, updating to a new version requires manually updating the image ### Volumes -The compose file creates two Docker volumes: +The compose file creates three Docker volumes: * `postgres_data`: PostgreSQL's data directory. +* `redis_data`: Redis's data directory. * `agent_files`: PatchMon's agent files. If you wish to bind either if their respective container paths to a host path rather than a Docker volume, you can do so in the Docker Compose file. @@ -201,6 +228,7 @@ For development with live reload and source code mounting: - Frontend: `http://localhost:3000` - Backend API: `http://localhost:3001` - Database: `localhost:5432` + - Redis: `localhost:6379` ## Development Docker Compose @@ -254,6 +282,7 @@ docker compose -f docker/docker-compose.dev.yml up -d --build ### Development Ports The development setup exposes additional ports for debugging: - **Database**: `5432` - Direct PostgreSQL access +- **Redis**: `6379` - Direct Redis access - **Backend**: `3001` - API server with development features - **Frontend**: `3000` - React development server with hot reload @@ -277,8 +306,8 @@ The development setup exposes additional ports for debugging: - **Prisma Schema Changes**: Backend service restarts automatically 4. **Database Access**: Connect database client directly to `localhost:5432` - -5. **Debug**: If started with `docker compose [...] up -d` or `docker compose [...] watch`, check logs manually: +5. **Redis Access**: Connect Redis client directly to `localhost:6379` +6. **Debug**: If started with `docker compose [...] up -d` or `docker compose [...] watch`, check logs manually: ```bash docker compose -f docker/docker-compose.dev.yml logs -f ``` @@ -288,6 +317,6 @@ The development setup exposes additional ports for debugging: - **Hot Reload**: Automatic code synchronization and service restarts - **Enhanced Logging**: Detailed logs for debugging -- **Direct Access**: Exposed ports for database and API debugging +- **Direct Access**: Exposed ports for database, Redis, and API debugging - **Health Checks**: Built-in health monitoring for services - **Volume Persistence**: Development data persists between restarts diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 55346b5..47e8f03 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -18,6 +18,22 @@ services: timeout: 5s retries: 7 + redis: + image: redis:7-alpine + restart: unless-stopped + command: redis-server --requirepass 1NS3CU6E_DEV_R3DIS_PASSW0RD + environment: + REDIS_PASSWORD: 1NS3CU6E_DEV_R3DIS_PASSW0RD + ports: + - "6379:6379" + volumes: + - ./compose_dev_data/redis:/data + healthcheck: + test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "1NS3CU6E_DEV_R3DIS_PASSW0RD", "ping"] + interval: 3s + timeout: 5s + retries: 7 + backend: build: context: .. @@ -34,6 +50,11 @@ services: SERVER_HOST: localhost SERVER_PORT: 3000 CORS_ORIGIN: http://localhost:3000 + # Redis Configuration + REDIS_HOST: redis + REDIS_PORT: 6379 + REDIS_PASSWORD: 1NS3CU6E_DEV_R3DIS_PASSW0RD + REDIS_DB: 0 ports: - "3001:3001" volumes: @@ -41,6 +62,8 @@ services: depends_on: database: condition: service_healthy + redis: + condition: service_healthy develop: watch: - action: sync diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 54ec143..2e55aee 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -16,6 +16,21 @@ services: timeout: 5s retries: 7 + redis: + image: redis:7-alpine + restart: unless-stopped + command: redis-server /usr/local/etc/redis/redis.conf + environment: + REDIS_PASSWORD: # CREATE A STRONG REDIS PASSWORD AND PUT IT HERE + volumes: + - redis_data:/data + - ./docker/redis.conf:/usr/local/etc/redis/redis.conf:ro + healthcheck: + test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "${REDIS_PASSWORD}", "ping"] + interval: 3s + timeout: 5s + retries: 7 + backend: image: ghcr.io/patchmon/patchmon-backend:latest restart: unless-stopped @@ -28,11 +43,18 @@ services: SERVER_HOST: localhost SERVER_PORT: 3000 CORS_ORIGIN: http://localhost:3000 + # Redis Configuration + REDIS_HOST: redis + REDIS_PORT: 6379 + REDIS_PASSWORD: REPLACE_YOUR_REDIS_PASSWORD_HERE + REDIS_DB: 0 volumes: - agent_files:/app/agents depends_on: database: condition: service_healthy + redis: + condition: service_healthy frontend: image: ghcr.io/patchmon/patchmon-frontend:latest @@ -45,4 +67,5 @@ services: volumes: postgres_data: + redis_data: agent_files: diff --git a/docker/nginx.conf.template b/docker/nginx.conf.template index f5b906e..dc96ca2 100644 --- a/docker/nginx.conf.template +++ b/docker/nginx.conf.template @@ -52,6 +52,64 @@ server { } } + # SSE (Server-Sent Events) specific configuration + location /api/v1/ws/status/ { + proxy_pass http://${BACKEND_HOST}:${BACKEND_PORT}; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + + # Critical SSE settings + proxy_buffering off; + proxy_cache off; + proxy_set_header Connection ''; + proxy_http_version 1.1; + chunked_transfer_encoding off; + + # Timeout settings for long-lived connections + proxy_read_timeout 24h; + proxy_send_timeout 24h; + proxy_connect_timeout 60s; + + # Disable nginx buffering for real-time streaming + proxy_request_buffering off; + proxy_max_temp_file_size 0; + + # CORS headers for SSE + add_header Access-Control-Allow-Origin * always; + add_header Access-Control-Allow-Methods "GET, OPTIONS" always; + add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization" always; + + # Handle preflight requests + if ($request_method = 'OPTIONS') { + return 204; + } + } + + # WebSocket upgrade handling + location /api/v1/agents/ws { + proxy_pass http://${BACKEND_HOST}:${BACKEND_PORT}; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + + # WebSocket timeout settings + proxy_read_timeout 24h; + proxy_send_timeout 24h; + proxy_connect_timeout 60s; + + # Disable buffering for WebSocket + proxy_buffering off; + proxy_cache off; + } + # Static assets caching location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { expires 1y; diff --git a/docker/redis.conf b/docker/redis.conf new file mode 100644 index 0000000..1759bfa --- /dev/null +++ b/docker/redis.conf @@ -0,0 +1,35 @@ +# Redis Configuration for PatchMon Production +# Security settings +requirepass ${REDIS_PASSWORD} +rename-command FLUSHDB "" +rename-command FLUSHALL "" +rename-command DEBUG "" +rename-command CONFIG "CONFIG_${REDIS_PASSWORD}" + +# Memory management +maxmemory 256mb +maxmemory-policy allkeys-lru + +# Persistence settings +save 900 1 +save 300 10 +save 60 10000 + +# Logging +loglevel notice +logfile "" + +# Network security +bind 127.0.0.1 +protected-mode yes + +# Performance tuning +tcp-keepalive 300 +timeout 0 + +# Disable dangerous commands +rename-command SHUTDOWN "SHUTDOWN_${REDIS_PASSWORD}" +rename-command KEYS "" +rename-command MONITOR "" +rename-command SLAVEOF "" +rename-command REPLICAOF "" diff --git a/frontend/src/components/InlineMultiGroupEdit.jsx b/frontend/src/components/InlineMultiGroupEdit.jsx new file mode 100644 index 0000000..f1ba3d2 --- /dev/null +++ b/frontend/src/components/InlineMultiGroupEdit.jsx @@ -0,0 +1,283 @@ +import { Check, ChevronDown, Edit2, X } from "lucide-react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; + +const InlineMultiGroupEdit = ({ + value = [], // Array of group IDs + onSave, + onCancel, + options = [], + className = "", + disabled = false, +}) => { + const [isEditing, setIsEditing] = useState(false); + const [selectedValues, setSelectedValues] = useState(value); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(""); + const [isOpen, setIsOpen] = useState(false); + const [dropdownPosition, setDropdownPosition] = useState({ + top: 0, + left: 0, + width: 0, + }); + const dropdownRef = useRef(null); + const buttonRef = useRef(null); + + useEffect(() => { + if (isEditing && dropdownRef.current) { + dropdownRef.current.focus(); + } + }, [isEditing]); + + useEffect(() => { + setSelectedValues(value); + // Force re-render when value changes + if (!isEditing) { + setIsOpen(false); + } + }, [value, isEditing]); + + // Calculate dropdown position + const calculateDropdownPosition = useCallback(() => { + if (buttonRef.current) { + const rect = buttonRef.current.getBoundingClientRect(); + setDropdownPosition({ + top: rect.bottom + window.scrollY + 4, + left: rect.left + window.scrollX, + width: rect.width, + }); + } + }, []); + + // Close dropdown when clicking outside + useEffect(() => { + const handleClickOutside = (event) => { + if (dropdownRef.current && !dropdownRef.current.contains(event.target)) { + setIsOpen(false); + } + }; + + if (isOpen) { + calculateDropdownPosition(); + document.addEventListener("mousedown", handleClickOutside); + window.addEventListener("resize", calculateDropdownPosition); + window.addEventListener("scroll", calculateDropdownPosition); + return () => { + document.removeEventListener("mousedown", handleClickOutside); + window.removeEventListener("resize", calculateDropdownPosition); + window.removeEventListener("scroll", calculateDropdownPosition); + }; + } + }, [isOpen, calculateDropdownPosition]); + + const handleEdit = () => { + if (disabled) return; + setIsEditing(true); + setSelectedValues(value); + setError(""); + // Automatically open dropdown when editing starts + setTimeout(() => { + setIsOpen(true); + }, 0); + }; + + const handleCancel = () => { + setIsEditing(false); + setSelectedValues(value); + setError(""); + setIsOpen(false); + if (onCancel) onCancel(); + }; + + const handleSave = async () => { + if (disabled || isLoading) return; + + // Check if values actually changed + const sortedCurrent = [...value].sort(); + const sortedSelected = [...selectedValues].sort(); + if (JSON.stringify(sortedCurrent) === JSON.stringify(sortedSelected)) { + setIsEditing(false); + setIsOpen(false); + return; + } + + setIsLoading(true); + setError(""); + + try { + await onSave(selectedValues); + setIsEditing(false); + setIsOpen(false); + } catch (err) { + setError(err.message || "Failed to save"); + } finally { + setIsLoading(false); + } + }; + + const handleKeyDown = (e) => { + if (e.key === "Enter") { + e.preventDefault(); + handleSave(); + } else if (e.key === "Escape") { + e.preventDefault(); + handleCancel(); + } + }; + + const toggleGroup = (groupId) => { + setSelectedValues((prev) => { + if (prev.includes(groupId)) { + return prev.filter((id) => id !== groupId); + } else { + return [...prev, groupId]; + } + }); + }; + + const _displayValue = useMemo(() => { + if (!value || value.length === 0) { + return "Ungrouped"; + } + if (value.length === 1) { + const option = options.find((opt) => opt.id === value[0]); + return option ? option.name : "Unknown Group"; + } + return `${value.length} groups`; + }, [value, options]); + + const displayGroups = useMemo(() => { + if (!value || value.length === 0) { + return []; + } + return value + .map((groupId) => options.find((opt) => opt.id === groupId)) + .filter(Boolean); + }, [value, options]); + + if (isEditing) { + return ( +
+
+
+ + + {isOpen && ( +
+
+ {options.map((option) => ( + + ))} + {options.length === 0 && ( +
+ No groups available +
+ )} +
+
+ )} +
+ + +
+ {error && ( + + {error} + + )} +
+ ); + } + + return ( +
+ {displayGroups.length === 0 ? ( + + Ungrouped + + ) : ( +
+ {displayGroups.map((group) => ( + + {group.name} + + ))} +
+ )} + {!disabled && ( + + )} +
+ ); +}; + +export default InlineMultiGroupEdit; diff --git a/frontend/src/components/settings/AgentManagementTab.jsx b/frontend/src/components/settings/AgentManagementTab.jsx index 60274fb..203317c 100644 --- a/frontend/src/components/settings/AgentManagementTab.jsx +++ b/frontend/src/components/settings/AgentManagementTab.jsx @@ -26,7 +26,7 @@ const AgentManagementTab = () => { }); // Helper function to get curl flags based on settings - const getCurlFlags = () => { + const _getCurlFlags = () => { return settings?.ignore_ssl_self_signed ? "-sk" : "-s"; }; @@ -177,29 +177,40 @@ const AgentManagementTab = () => { Agent Uninstall Command
-

+

To completely remove PatchMon from a host:

-
-
- curl {getCurlFlags()} {window.location.origin} - /api/v1/hosts/remove | sudo bash + + {/* Go Agent Uninstall */} +
+
+
+
+ sudo patchmon-agent uninstall +
+ +
+
+ Options: --remove-config,{" "} + --remove-logs, --remove-all,{" "} + --force +
-
+

- โš ๏ธ This will remove all PatchMon files, configuration, and - crontab entries + โš ๏ธ This command will remove all PatchMon files, + configuration, and crontab entries

diff --git a/frontend/src/components/settings/AgentUpdatesTab.jsx b/frontend/src/components/settings/AgentUpdatesTab.jsx index f60dfdc..1274c2e 100644 --- a/frontend/src/components/settings/AgentUpdatesTab.jsx +++ b/frontend/src/components/settings/AgentUpdatesTab.jsx @@ -446,6 +446,53 @@ const AgentUpdatesTab = () => {
)} + + {/* Uninstall Instructions */} +
+
+ +
+

+ Agent Uninstall Command +

+
+

To completely remove PatchMon from a host:

+ + {/* Go Agent Uninstall */} +
+
+
+
+ sudo patchmon-agent uninstall +
+ +
+
+ Options: --remove-config,{" "} + --remove-logs, --remove-all,{" "} + --force +
+
+
+ +

+ โš ๏ธ This command will remove all PatchMon files, configuration, + and crontab entries +

+
+
+
+
); }; diff --git a/frontend/src/pages/Automation.jsx b/frontend/src/pages/Automation.jsx index 8eef48a..f69fb71 100644 --- a/frontend/src/pages/Automation.jsx +++ b/frontend/src/pages/Automation.jsx @@ -155,6 +155,20 @@ const Automation = () => { year: "numeric", }); } + if (schedule === "Daily at 3 AM") { + const now = new Date(); + const tomorrow = new Date(now); + tomorrow.setDate(tomorrow.getDate() + 1); + tomorrow.setHours(3, 0, 0, 0); + return tomorrow.toLocaleString([], { + hour12: true, + hour: "numeric", + minute: "2-digit", + day: "numeric", + month: "numeric", + year: "numeric", + }); + } if (schedule === "Every hour") { const now = new Date(); const nextHour = new Date(now); @@ -188,6 +202,13 @@ const Automation = () => { tomorrow.setHours(2, 0, 0, 0); return tomorrow.getTime(); } + if (schedule === "Daily at 3 AM") { + const now = new Date(); + const tomorrow = new Date(now); + tomorrow.setDate(tomorrow.getDate() + 1); + tomorrow.setHours(3, 0, 0, 0); + return tomorrow.getTime(); + } if (schedule === "Every hour") { const now = new Date(); const nextHour = new Date(now); @@ -220,6 +241,8 @@ const Automation = () => { endpoint = "/automation/trigger/session-cleanup"; } else if (jobType === "orphaned-repos") { endpoint = "/automation/trigger/orphaned-repo-cleanup"; + } else if (jobType === "orphaned-packages") { + endpoint = "/automation/trigger/orphaned-package-cleanup"; } else if (jobType === "agent-collection") { endpoint = "/automation/trigger/agent-collection"; } @@ -531,6 +554,10 @@ const Automation = () => { automation.queue.includes("orphaned-repo") ) { triggerManualJob("orphaned-repos"); + } else if ( + automation.queue.includes("orphaned-package") + ) { + triggerManualJob("orphaned-packages"); } else if ( automation.queue.includes("agent-commands") ) { diff --git a/frontend/src/pages/Dashboard.jsx b/frontend/src/pages/Dashboard.jsx index 1266bb9..c5bdfae 100644 --- a/frontend/src/pages/Dashboard.jsx +++ b/frontend/src/pages/Dashboard.jsx @@ -200,6 +200,8 @@ const Dashboard = () => { data: packageTrendsData, isLoading: packageTrendsLoading, error: _packageTrendsError, + refetch: refetchPackageTrends, + isFetching: packageTrendsFetching, } = useQuery({ queryKey: ["packageTrends", packageTrendsPeriod, packageTrendsHost], queryFn: () => { @@ -771,6 +773,20 @@ const Dashboard = () => { Package Trends Over Time
+ {/* Refresh Button */} + + {/* Period Selector */} setArchitecture(e.target.value)} + className="px-3 py-2 border border-primary-300 dark:border-primary-600 rounded-md bg-white dark:bg-secondary-800 text-sm text-secondary-900 dark:text-white focus:ring-primary-500 focus:border-primary-500" + > + + + + +

+ Select the architecture of the target host +

+
+
{
- 2. Download and Install Agent Script + 2. Download and Install Agent Binary
@@ -1484,7 +1542,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { type="button" onClick={() => copyToClipboard( - `curl ${getCurlFlags()} -o /usr/local/bin/patchmon-agent.sh ${serverUrl}/api/v1/hosts/agent/download -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" && sudo chmod +x /usr/local/bin/patchmon-agent.sh`, + `curl ${getCurlFlags()} -o /usr/local/bin/patchmon-agent ${serverUrl}/api/v1/hosts/agent/download?arch=${architecture} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" && sudo chmod +x /usr/local/bin/patchmon-agent`, ) } className="btn-secondary flex items-center gap-1" @@ -1502,7 +1560,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
@@ -1510,7 +1568,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { type="button" onClick={() => copyToClipboard( - `sudo /usr/local/bin/patchmon-agent.sh configure "${host.api_id}" "${host.api_key}" "${serverUrl}"`, + `sudo /usr/local/bin/patchmon-agent config set-api "${host.api_id}" "${host.api_key}" "${serverUrl}"`, ) } className="btn-secondary flex items-center gap-1" @@ -1528,7 +1586,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
@@ -1536,7 +1594,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { type="button" onClick={() => copyToClipboard( - "sudo /usr/local/bin/patchmon-agent.sh test", + "sudo /usr/local/bin/patchmon-agent ping", ) } className="btn-secondary flex items-center gap-1" @@ -1554,7 +1612,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
@@ -1562,7 +1620,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { type="button" onClick={() => copyToClipboard( - "sudo /usr/local/bin/patchmon-agent.sh update", + "sudo /usr/local/bin/patchmon-agent report", ) } className="btn-secondary flex items-center gap-1" @@ -1575,12 +1633,33 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
- 6. Setup Crontab (Optional) + 6. Create Systemd Service File
/dev/null | grep -v "patchmon-agent.sh update"; echo "${new Date().getMinutes()} * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1") | sudo crontab -`} + value={`sudo tee /etc/systemd/system/patchmon-agent.service > /dev/null << 'EOF' +[Unit] +Description=PatchMon Agent Service +After=network.target +Wants=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/bin/patchmon-agent serve +Restart=always +RestartSec=10 +WorkingDirectory=/etc/patchmon + +# Logging +StandardOutput=journal +StandardError=journal +SyslogIdentifier=patchmon-agent + +[Install] +WantedBy=multi-user.target +EOF`} readOnly className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white" /> @@ -1588,7 +1667,28 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { type="button" onClick={() => copyToClipboard( - `(sudo crontab -l 2>/dev/null | grep -v "patchmon-agent.sh update"; echo "${new Date().getMinutes()} * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1") | sudo crontab -`, + `sudo tee /etc/systemd/system/patchmon-agent.service > /dev/null << 'EOF' +[Unit] +Description=PatchMon Agent Service +After=network.target +Wants=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/bin/patchmon-agent serve +Restart=always +RestartSec=10 +WorkingDirectory=/etc/patchmon + +# Logging +StandardOutput=journal +StandardError=journal +SyslogIdentifier=patchmon-agent + +[Install] +WantedBy=multi-user.target +EOF`, ) } className="btn-secondary flex items-center gap-1" @@ -1598,6 +1698,64 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
+ +
+
+ 7. Enable and Start Service +
+
+ + +
+

+ This will start the agent service and establish WebSocket + connection for real-time communication +

+
+ +
+
+ 8. Verify Service Status +
+
+ + +
+

+ Check that the service is running and WebSocket connection + is established +

+
diff --git a/frontend/src/pages/Hosts.jsx b/frontend/src/pages/Hosts.jsx index 3621176..191ecc3 100644 --- a/frontend/src/pages/Hosts.jsx +++ b/frontend/src/pages/Hosts.jsx @@ -27,7 +27,7 @@ import { import { useEffect, useId, useMemo, useState } from "react"; import { Link, useNavigate, useSearchParams } from "react-router-dom"; import InlineEdit from "../components/InlineEdit"; -import InlineGroupEdit from "../components/InlineGroupEdit"; +import InlineMultiGroupEdit from "../components/InlineMultiGroupEdit"; import InlineToggle from "../components/InlineToggle"; import { adminHostsAPI, @@ -35,14 +35,14 @@ import { formatRelativeTime, hostGroupsAPI, } from "../utils/api"; -import { OSIcon } from "../utils/osIcons.jsx"; +import { getOSDisplayName, OSIcon } from "../utils/osIcons.jsx"; // Add Host Modal Component const AddHostModal = ({ isOpen, onClose, onSuccess }) => { const friendlyNameId = useId(); const [formData, setFormData] = useState({ friendly_name: "", - hostGroupId: "", + hostGroupIds: [], // Changed to array for multiple selection }); const [isSubmitting, setIsSubmitting] = useState(false); const [error, setError] = useState(""); @@ -65,7 +65,7 @@ const AddHostModal = ({ isOpen, onClose, onSuccess }) => { const response = await adminHostsAPI.create(formData); console.log("Host created successfully:", formData.friendly_name); onSuccess(response.data); - setFormData({ friendly_name: "", hostGroupId: "" }); + setFormData({ friendly_name: "", hostGroupIds: [] }); onClose(); } catch (err) { console.error("Full error object:", err); @@ -135,68 +135,56 @@ const AddHostModal = ({ isOpen, onClose, onSuccess }) => {
- Host Group + Host Groups -
- {/* No Group Option */} - - +
{/* Host Group Options */} {hostGroups?.map((group) => ( - + ))}

- Optional: Assign this host to a group for better organization. + Optional: Select one or more groups to assign this host to for + better organization.

@@ -341,7 +329,7 @@ const Hosts = () => { visible: true, order: 8, }, - { id: "ws_status", label: "Online", visible: true, order: 9 }, + { id: "ws_status", label: "Connection", visible: true, order: 9 }, { id: "status", label: "Status", visible: true, order: 10 }, { id: "updates", label: "Updates", visible: true, order: 11 }, { id: "notes", label: "Notes", visible: false, order: 12 }, @@ -368,8 +356,11 @@ const Hosts = () => { localStorage.removeItem("hosts-column-config"); return defaultConfig; } else { - // Use the existing configuration - return savedConfig; + // Ensure ws_status column is visible in saved config + const updatedConfig = savedConfig.map((col) => + col.id === "ws_status" ? { ...col, visible: true } : col, + ); + return updatedConfig; } } catch { // If there's an error parsing the config, clear it and use default @@ -479,11 +470,12 @@ const Hosts = () => { } }; - es.onerror = () => { + es.onerror = (_error) => { + console.log(`[SSE] Connection error for ${apiId}, retrying...`); es?.close(); eventSources.delete(apiId); if (isMounted) { - // Retry connection after 5 seconds + // Retry connection after 5 seconds with exponential backoff setTimeout(() => connectHost(apiId), 5000); } }; @@ -553,7 +545,7 @@ const Hosts = () => { }, }); - const updateHostGroupMutation = useMutation({ + const _updateHostGroupMutation = useMutation({ mutationFn: ({ hostId, hostGroupId }) => { console.log("updateHostGroupMutation called with:", { hostId, @@ -599,6 +591,46 @@ const Hosts = () => { }, }); + const updateHostGroupsMutation = useMutation({ + mutationFn: ({ hostId, groupIds }) => { + console.log("updateHostGroupsMutation called with:", { + hostId, + groupIds, + }); + return adminHostsAPI.updateGroups(hostId, groupIds).then((res) => { + console.log("updateGroups API response:", res); + return res.data; + }); + }, + onSuccess: (data) => { + // Update the cache with the new host data + queryClient.setQueryData(["hosts"], (oldData) => { + console.log("Old cache data before update:", oldData); + if (!oldData) return oldData; + const updatedData = oldData.map((host) => { + if (host.id === data.host.id) { + console.log( + "Updating host in cache:", + host.id, + "with new data:", + data.host, + ); + return data.host; + } + return host; + }); + console.log("New cache data after update:", updatedData); + return updatedData; + }); + + // Also invalidate to ensure consistency + queryClient.invalidateQueries(["hosts"]); + }, + onError: (error) => { + console.error("updateHostGroupsMutation error:", error); + }, + }); + const toggleAutoUpdateMutation = useMutation({ mutationFn: ({ hostId, autoUpdate }) => adminHostsAPI @@ -879,7 +911,7 @@ const Hosts = () => { visible: true, order: 8, }, - { id: "ws_status", label: "Online", visible: true, order: 9 }, + { id: "ws_status", label: "Connection", visible: true, order: 9 }, { id: "status", label: "Status", visible: true, order: 10 }, { id: "updates", label: "Updates", visible: true, order: 11 }, { id: "notes", label: "Notes", visible: false, order: 12 }, @@ -947,27 +979,33 @@ const Hosts = () => { {host.ip || "N/A"}
); - case "group": + case "group": { + // Extract group IDs from the new many-to-many structure + const groupIds = + host.host_group_memberships?.map( + (membership) => membership.host_groups.id, + ) || []; return ( - - updateHostGroupMutation.mutate({ + + updateHostGroupsMutation.mutate({ hostId: host.id, - hostGroupId: newGroupId, + groupIds: newGroupIds, }) } options={hostGroups || []} - placeholder="Select group..." + placeholder="Select groups..." className="w-full" /> ); + } case "os": return (
- {host.os_type} + {getOSDisplayName(host.os_type)}
); case "os_version": @@ -1000,24 +1038,30 @@ const Hosts = () => { const wsStatus = wsStatusMap[host.api_id]; if (!wsStatus) { return ( - - ... + +
+ Unknown
); } return ( +
{wsStatus.connected ? (wsStatus.secure ? "WSS" : "WS") : "Offline"}
); @@ -1616,6 +1660,11 @@ const Hosts = () => {
{column.label}
+ ) : column.id === "ws_status" ? ( +
+ + {column.label} +
) : column.id === "status" ? (
-
- -
-

+

Drag to reorder columns or toggle visibility

+
-
+ {/* Scrollable content */} +
+
{columnConfig.map((column, index) => ( ))}
+
-
+ {/* Footer */} +
+
diff --git a/frontend/src/pages/Settings.jsx b/frontend/src/pages/Settings.jsx index c569c36..60539f8 100644 --- a/frontend/src/pages/Settings.jsx +++ b/frontend/src/pages/Settings.jsx @@ -120,7 +120,7 @@ const Settings = () => { }); // Helper function to get curl flags based on settings - const getCurlFlags = () => { + const _getCurlFlags = () => { return settings?.ignore_ssl_self_signed ? "-sk" : "-s"; }; @@ -1155,28 +1155,39 @@ const Settings = () => { Agent Uninstall Command
-

+

To completely remove PatchMon from a host:

-
-
- curl {getCurlFlags()} {window.location.origin} - /api/v1/hosts/remove | sudo bash + + {/* Go Agent Uninstall */} +
+
+
+
+ sudo patchmon-agent uninstall +
+ +
+
+ Options: --remove-config,{" "} + --remove-logs,{" "} + --remove-all, --force +
-
+

- โš ๏ธ This will remove all PatchMon files, + โš ๏ธ This command will remove all PatchMon files, configuration, and crontab entries

diff --git a/frontend/src/utils/api.js b/frontend/src/utils/api.js index d212b4f..f105a37 100644 --- a/frontend/src/utils/api.js +++ b/frontend/src/utils/api.js @@ -68,6 +68,11 @@ export const dashboardAPI = { const url = `/dashboard/package-trends${queryString ? `?${queryString}` : ""}`; return api.get(url); }, + getPackageSpikeAnalysis: (params = {}) => { + const queryString = new URLSearchParams(params).toString(); + const url = `/dashboard/package-spike-analysis${queryString ? `?${queryString}` : ""}`; + return api.get(url); + }, getRecentUsers: () => api.get("/dashboard/recent-users"), getRecentCollection: () => api.get("/dashboard/recent-collection"), }; @@ -82,8 +87,12 @@ export const adminHostsAPI = { api.post(`/hosts/${hostId}/regenerate-credentials`), updateGroup: (hostId, hostGroupId) => api.put(`/hosts/${hostId}/group`, { hostGroupId }), + updateGroups: (hostId, groupIds) => + api.put(`/hosts/${hostId}/groups`, { groupIds }), bulkUpdateGroup: (hostIds, hostGroupId) => api.put("/hosts/bulk/group", { hostIds, hostGroupId }), + bulkUpdateGroups: (hostIds, groupIds) => + api.put("/hosts/bulk/groups", { hostIds, groupIds }), toggleAutoUpdate: (hostId, autoUpdate) => api.patch(`/hosts/${hostId}/auto-update`, { auto_update: autoUpdate }), updateFriendlyName: (hostId, friendlyName) => diff --git a/frontend/src/utils/osIcons.jsx b/frontend/src/utils/osIcons.jsx index a6c33e3..36668ed 100644 --- a/frontend/src/utils/osIcons.jsx +++ b/frontend/src/utils/osIcons.jsx @@ -1,43 +1,104 @@ import { Monitor, Server } from "lucide-react"; import { DiWindows } from "react-icons/di"; -// Import OS icons from react-icons +// Import OS icons from react-icons Simple Icons - using only confirmed available icons import { + SiAlmalinux, SiAlpinelinux, SiArchlinux, SiCentos, SiDebian, + SiDeepin, + SiElementary, SiFedora, + SiGentoo, + SiKalilinux, SiLinux, + SiLinuxmint, SiMacos, + SiManjaro, + SiOpensuse, + SiOracle, + SiParrotsecurity, + SiPopos, + SiRedhat, + SiRockylinux, + SiSlackware, + SiSolus, + SiSuse, + SiTails, SiUbuntu, + SiZorin, } from "react-icons/si"; /** * OS Icon mapping utility * Maps operating system types to appropriate react-icons components + * Now uses specific icons based on actual OS names from /etc/os-release */ export const getOSIcon = (osType) => { if (!osType) return Monitor; const os = osType.toLowerCase(); - // Linux distributions with authentic react-icons - if (os.includes("ubuntu")) return SiUbuntu; + // Ubuntu and Ubuntu variants + if (os.includes("ubuntu")) { + // For Ubuntu variants, use generic Ubuntu icon as fallback + return SiUbuntu; + } + + // Pop!_OS + if (os.includes("pop") || os.includes("pop!_os")) return SiPopos; + + // Linux Mint + if (os.includes("mint") || os.includes("linuxmint")) return SiLinuxmint; + + // Elementary OS + if (os.includes("elementary")) return SiElementary; + + // Debian if (os.includes("debian")) return SiDebian; - if ( - os.includes("centos") || - os.includes("rhel") || - os.includes("red hat") || - os.includes("almalinux") || - os.includes("rocky") - ) - return SiCentos; - if (os === "ol" || os.includes("oraclelinux") || os.includes("oracle linux")) - return SiLinux; // Use generic Linux icon for Oracle Linux + + // Rocky Linux + if (os.includes("rocky")) return SiRockylinux; + + // AlmaLinux + if (os.includes("alma") || os.includes("almalinux")) return SiAlmalinux; + + // CentOS + if (os.includes("centos")) return SiCentos; + + // Red Hat Enterprise Linux + if (os.includes("rhel") || os.includes("red hat")) return SiRedhat; + + // Fedora if (os.includes("fedora")) return SiFedora; + + // Oracle Linux + if (os === "ol" || os.includes("oraclelinux") || os.includes("oracle linux")) + return SiOracle; + + // SUSE distributions + if (os.includes("opensuse")) return SiOpensuse; + if (os.includes("suse")) return SiSuse; + + // Arch-based distributions if (os.includes("arch")) return SiArchlinux; + if (os.includes("manjaro")) return SiManjaro; + if (os.includes("endeavour") || os.includes("endeavouros")) + return SiArchlinux; // Fallback to Arch + if (os.includes("garuda")) return SiArchlinux; // Fallback to Arch + if (os.includes("blackarch")) return SiArchlinux; // Fallback to Arch + + // Other distributions if (os.includes("alpine")) return SiAlpinelinux; - if (os.includes("suse") || os.includes("opensuse")) return SiLinux; // SUSE uses generic Linux icon + if (os.includes("gentoo")) return SiGentoo; + if (os.includes("slackware")) return SiSlackware; + if (os.includes("zorin")) return SiZorin; + if (os.includes("deepin")) return SiDeepin; + if (os.includes("solus")) return SiSolus; + if (os.includes("tails")) return SiTails; + if (os.includes("parrot")) return SiParrotsecurity; + if (os.includes("kali")) return SiKalilinux; // Generic Linux if (os.includes("linux")) return SiLinux; @@ -70,27 +131,83 @@ export const getOSColor = (osType) => { /** * OS Display name utility * Provides clean, formatted OS names for display + * Updated to handle more distributions from /etc/os-release */ export const getOSDisplayName = (osType) => { if (!osType) return "Unknown"; const os = osType.toLowerCase(); - // Linux distributions - if (os.includes("ubuntu")) return "Ubuntu"; + // Ubuntu and variants + if (os.includes("ubuntu")) { + if (os.includes("kubuntu")) return "Kubuntu"; + if (os.includes("lubuntu")) return "Lubuntu"; + if (os.includes("xubuntu")) return "Xubuntu"; + if (os.includes("ubuntu mate") || os.includes("ubuntumate")) + return "Ubuntu MATE"; + if (os.includes("ubuntu budgie") || os.includes("ubuntubudgie")) + return "Ubuntu Budgie"; + if (os.includes("ubuntu studio") || os.includes("ubuntustudio")) + return "Ubuntu Studio"; + if (os.includes("ubuntu kylin") || os.includes("ubuntukylin")) + return "Ubuntu Kylin"; + return "Ubuntu"; + } + + // Pop!_OS + if (os.includes("pop") || os.includes("pop!_os")) return "Pop!_OS"; + + // Linux Mint + if (os.includes("mint") || os.includes("linuxmint")) return "Linux Mint"; + + // Elementary OS + if (os.includes("elementary")) return "Elementary OS"; + + // Debian if (os.includes("debian")) return "Debian"; - if (os.includes("centos")) return "CentOS"; - if (os.includes("almalinux")) return "AlmaLinux"; + + // Rocky Linux if (os.includes("rocky")) return "Rocky Linux"; - if (os === "ol" || os.includes("oraclelinux") || os.includes("oracle linux")) - return "Oracle Linux"; + + // AlmaLinux + if (os.includes("alma") || os.includes("almalinux")) return "AlmaLinux"; + + // CentOS + if (os.includes("centos")) return "CentOS"; + + // Red Hat Enterprise Linux if (os.includes("rhel") || os.includes("red hat")) return "Red Hat Enterprise Linux"; + + // Fedora if (os.includes("fedora")) return "Fedora"; - if (os.includes("arch")) return "Arch Linux"; - if (os.includes("suse")) return "SUSE Linux"; + + // Oracle Linux + if (os === "ol" || os.includes("oraclelinux") || os.includes("oracle linux")) + return "Oracle Linux"; + + // SUSE distributions if (os.includes("opensuse")) return "openSUSE"; + if (os.includes("suse")) return "SUSE Linux"; + + // Arch-based distributions + if (os.includes("arch")) return "Arch Linux"; + if (os.includes("manjaro")) return "Manjaro"; + if (os.includes("endeavour") || os.includes("endeavouros")) + return "EndeavourOS"; + if (os.includes("garuda")) return "Garuda Linux"; + if (os.includes("blackarch")) return "BlackArch Linux"; + + // Other distributions if (os.includes("alpine")) return "Alpine Linux"; + if (os.includes("gentoo")) return "Gentoo"; + if (os.includes("slackware")) return "Slackware"; + if (os.includes("zorin")) return "Zorin OS"; + if (os.includes("deepin")) return "Deepin"; + if (os.includes("solus")) return "Solus"; + if (os.includes("tails")) return "Tails"; + if (os.includes("parrot")) return "Parrot Security"; + if (os.includes("kali")) return "Kali Linux"; // Generic Linux if (os.includes("linux")) return "Linux";