Merge pull request #288 from PatchMon/1-3-3

Bug fixes
This commit is contained in:
9 Technology Group LTD
2025-11-07 22:14:37 +00:00
committed by GitHub
26 changed files with 1550 additions and 410 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -136,10 +136,34 @@ if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then
error "Missing required parameters. This script should be called via the PatchMon web interface."
fi
# Parse architecture parameter (default to amd64)
ARCHITECTURE="${ARCHITECTURE:-amd64}"
if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" ]]; then
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64"
# Auto-detect architecture if not explicitly set
if [[ -z "$ARCHITECTURE" ]]; then
arch_raw=$(uname -m 2>/dev/null || echo "unknown")
# Map architecture to supported values
case "$arch_raw" in
"x86_64")
ARCHITECTURE="amd64"
;;
"i386"|"i686")
ARCHITECTURE="386"
;;
"aarch64"|"arm64")
ARCHITECTURE="arm64"
;;
"armv7l"|"armv6l"|"arm")
ARCHITECTURE="arm"
;;
*)
warning "⚠️ Unknown architecture '$arch_raw', defaulting to amd64"
ARCHITECTURE="amd64"
;;
esac
fi
# Validate architecture
if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" && "$ARCHITECTURE" != "arm" ]]; then
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64, arm"
fi
# Check if --force flag is set (for bypassing broken packages)
@@ -234,7 +258,98 @@ install_apt_packages() {
fi
}
# Detect package manager and install jq and curl
# Function to check and install packages for yum/dnf
install_yum_dnf_packages() {
local pkg_manager="$1"
shift
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
if [[ "$pkg_manager" == "yum" ]]; then
yum install -y "${missing_packages[@]}"
else
dnf install -y "${missing_packages[@]}"
fi
}
# Function to check and install packages for zypper
install_zypper_packages() {
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
zypper install -y "${missing_packages[@]}"
}
# Function to check and install packages for pacman
install_pacman_packages() {
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
pacman -S --noconfirm "${missing_packages[@]}"
}
# Function to check and install packages for apk
install_apk_packages() {
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
apk add --no-cache "${missing_packages[@]}"
}
# Detect package manager and install jq, curl, and bc
if command -v apt-get >/dev/null 2>&1; then
# Debian/Ubuntu
info "Detected apt-get (Debian/Ubuntu)"
@@ -260,31 +375,31 @@ elif command -v yum >/dev/null 2>&1; then
info "Detected yum (CentOS/RHEL 7)"
echo ""
info "Installing jq, curl, and bc..."
yum install -y jq curl bc
install_yum_dnf_packages yum jq curl bc
elif command -v dnf >/dev/null 2>&1; then
# CentOS/RHEL 8+/Fedora
info "Detected dnf (CentOS/RHEL 8+/Fedora)"
echo ""
info "Installing jq, curl, and bc..."
dnf install -y jq curl bc
install_yum_dnf_packages dnf jq curl bc
elif command -v zypper >/dev/null 2>&1; then
# openSUSE
info "Detected zypper (openSUSE)"
echo ""
info "Installing jq, curl, and bc..."
zypper install -y jq curl bc
install_zypper_packages jq curl bc
elif command -v pacman >/dev/null 2>&1; then
# Arch Linux
info "Detected pacman (Arch Linux)"
echo ""
info "Installing jq, curl, and bc..."
pacman -S --noconfirm jq curl bc
install_pacman_packages jq curl bc
elif command -v apk >/dev/null 2>&1; then
# Alpine Linux
info "Detected apk (Alpine Linux)"
echo ""
info "Installing jq, curl, and bc..."
apk add --no-cache jq curl bc
install_apk_packages jq curl bc
else
warning "Could not detect package manager. Please ensure 'jq', 'curl', and 'bc' are installed manually."
fi
@@ -311,6 +426,37 @@ else
mkdir -p /etc/patchmon
fi
# Check if agent is already configured and working (before we overwrite anything)
info "🔍 Checking if agent is already configured..."
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
if [[ -f /usr/local/bin/patchmon-agent ]]; then
info "📋 Found existing agent configuration"
info "🧪 Testing existing configuration with ping..."
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
success "✅ Agent is already configured and ping successful"
info "📋 Existing configuration is working - skipping installation"
info ""
info "If you want to reinstall, remove the configuration files first:"
info " sudo rm -f /etc/patchmon/config.yml /etc/patchmon/credentials.yml"
echo ""
exit 0
else
warning "⚠️ Agent configuration exists but ping failed"
warning "⚠️ Will move existing configuration and reinstall"
echo ""
fi
else
warning "⚠️ Configuration files exist but agent binary is missing"
warning "⚠️ Will move existing configuration and reinstall"
echo ""
fi
else
success "✅ Agent not yet configured - proceeding with installation"
echo ""
fi
# Step 2: Create configuration files
info "🔐 Creating configuration files..."
@@ -426,33 +572,6 @@ if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then
fi
# Step 4: Test the configuration
# Check if this machine is already enrolled
info "🔍 Checking if machine is already enrolled..."
existing_check=$(curl $CURL_FLAGS -s -X POST \
-H "X-API-ID: $API_ID" \
-H "X-API-KEY: $API_KEY" \
-H "Content-Type: application/json" \
-d "{\"machine_id\": \"$MACHINE_ID\"}" \
"$PATCHMON_URL/api/v1/hosts/check-machine-id" \
-w "\n%{http_code}" 2>&1)
http_code=$(echo "$existing_check" | tail -n 1)
response_body=$(echo "$existing_check" | sed '$d')
if [[ "$http_code" == "200" ]]; then
already_enrolled=$(echo "$response_body" | jq -r '.exists' 2>/dev/null || echo "false")
if [[ "$already_enrolled" == "true" ]]; then
warning "⚠️ This machine is already enrolled in PatchMon"
info "Machine ID: $MACHINE_ID"
info "Existing host: $(echo "$response_body" | jq -r '.host.friendly_name' 2>/dev/null)"
info ""
info "The agent will be reinstalled/updated with existing credentials."
echo ""
else
success "✅ Machine not yet enrolled - proceeding with installation"
fi
fi
info "🧪 Testing API credentials and connectivity..."
if /usr/local/bin/patchmon-agent ping; then
success "✅ TEST: API credentials are valid and server is reachable"
@@ -460,15 +579,8 @@ else
error "❌ Failed to validate API credentials or reach server"
fi
# Step 5: Send initial data and setup systemd service
info "📊 Sending initial package data to server..."
if /usr/local/bin/patchmon-agent report; then
success "✅ UPDATE: Initial package data sent successfully"
else
warning "⚠️ Failed to send initial data. You can retry later with: /usr/local/bin/patchmon-agent report"
fi
# Step 6: Setup systemd service for WebSocket connection
# Step 5: Setup systemd service for WebSocket connection
# Note: The service will automatically send an initial report on startup (see serve.go)
info "🔧 Setting up systemd service..."
# Stop and disable existing service if it exists

View File

@@ -230,6 +230,40 @@ while IFS= read -r line; do
info " ✓ Host enrolled successfully: $api_id"
# Check if agent is already installed and working
info " Checking if agent is already configured..."
config_check=$(timeout 10 pct exec "$vmid" -- bash -c "
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
if [[ -f /usr/local/bin/patchmon-agent ]]; then
# Try to ping using existing configuration
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
echo 'ping_success'
else
echo 'ping_failed'
fi
else
echo 'binary_missing'
fi
else
echo 'not_configured'
fi
" 2>/dev/null </dev/null || echo "error")
if [[ "$config_check" == "ping_success" ]]; then
info " ✓ Host already enrolled and agent ping successful - skipping"
((skipped_count++)) || true
echo ""
continue
elif [[ "$config_check" == "ping_failed" ]]; then
warn " ⚠ Agent configuration exists but ping failed - will reinstall"
elif [[ "$config_check" == "binary_missing" ]]; then
warn " ⚠ Config exists but agent binary missing - will reinstall"
elif [[ "$config_check" == "not_configured" ]]; then
info " Agent not yet configured - proceeding with installation"
else
warn " ⚠ Could not check agent status - proceeding with installation"
fi
# Ensure curl is installed in the container
info " Checking for curl in container..."
curl_check=$(timeout 10 pct exec "$vmid" -- bash -c "command -v curl >/dev/null 2>&1 && echo 'installed' || echo 'missing'" 2>/dev/null </dev/null || echo "error")
@@ -283,9 +317,10 @@ while IFS= read -r line; do
install_exit_code=0
# Download and execute in separate steps to avoid stdin issues with piping
install_output=$(timeout 180 pct exec "$vmid" -- bash -c "
# Pass CURL_FLAGS as environment variable to container
install_output=$(timeout 180 pct exec "$vmid" --env CURL_FLAGS="$CURL_FLAGS" -- bash -c "
cd /tmp
curl $CURL_FLAGS \
curl \$CURL_FLAGS \
-H \"X-API-ID: $api_id\" \
-H \"X-API-KEY: $api_key\" \
-o patchmon-install.sh \
@@ -422,9 +457,10 @@ if [[ ${#dpkg_error_containers[@]} -gt 0 ]]; then
info " Retrying agent installation..."
install_exit_code=0
install_output=$(timeout 180 pct exec "$vmid" -- bash -c "
# Pass CURL_FLAGS as environment variable to container
install_output=$(timeout 180 pct exec "$vmid" --env CURL_FLAGS="$CURL_FLAGS" -- bash -c "
cd /tmp
curl $CURL_FLAGS \
curl \$CURL_FLAGS \
-H \"X-API-ID: $api_id\" \
-H \"X-API-KEY: $api_key\" \
-o patchmon-install.sh \

View File

@@ -0,0 +1,16 @@
-- CreateTable
CREATE TABLE "system_statistics" (
"id" TEXT NOT NULL,
"unique_packages_count" INTEGER NOT NULL,
"unique_security_count" INTEGER NOT NULL,
"total_packages" INTEGER NOT NULL,
"total_hosts" INTEGER NOT NULL,
"hosts_needing_updates" INTEGER NOT NULL,
"timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "system_statistics_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "system_statistics_timestamp_idx" ON "system_statistics"("timestamp");

View File

@@ -202,7 +202,7 @@ model update_history {
id String @id
host_id String
packages_count Int
security_count Int
security_count Int
total_packages Int?
payload_size_kb Float?
execution_time Float?
@@ -212,6 +212,18 @@ model update_history {
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
}
model system_statistics {
id String @id
unique_packages_count Int
unique_security_count Int
total_packages Int
total_hosts Int
hosts_needing_updates Int
timestamp DateTime @default(now())
@@index([timestamp])
}
model users {
id String @id
username String @unique

View File

@@ -242,6 +242,30 @@ router.post(
},
);
// Trigger manual system statistics collection
router.post(
"/trigger/system-statistics",
authenticateToken,
async (_req, res) => {
try {
const job = await queueManager.triggerSystemStatistics();
res.json({
success: true,
data: {
jobId: job.id,
message: "System statistics collection triggered successfully",
},
});
} catch (error) {
console.error("Error triggering system statistics collection:", error);
res.status(500).json({
success: false,
error: "Failed to trigger system statistics collection",
});
}
},
);
// Get queue health status
router.get("/health", authenticateToken, async (_req, res) => {
try {
@@ -300,6 +324,7 @@ router.get("/overview", authenticateToken, async (_req, res) => {
queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1),
queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1),
queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1),
queueManager.getRecentJobs(QUEUE_NAMES.SYSTEM_STATISTICS, 1),
]);
// Calculate overview metrics
@@ -309,21 +334,24 @@ router.get("/overview", authenticateToken, async (_req, res) => {
stats[QUEUE_NAMES.SESSION_CLEANUP].delayed +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed,
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].delayed,
runningTasks:
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active +
stats[QUEUE_NAMES.SESSION_CLEANUP].active +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active,
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].active,
failedTasks:
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed +
stats[QUEUE_NAMES.SESSION_CLEANUP].failed +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed,
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].failed,
totalAutomations: Object.values(stats).reduce((sum, queueStats) => {
return (
@@ -435,6 +463,22 @@ router.get("/overview", authenticateToken, async (_req, res) => {
: "Never run",
stats: stats[QUEUE_NAMES.AGENT_COMMANDS],
},
{
name: "System Statistics Collection",
queue: QUEUE_NAMES.SYSTEM_STATISTICS,
description: "Collects aggregated system-wide package statistics",
schedule: "Every 30 minutes",
lastRun: recentJobs[6][0]?.finishedOn
? new Date(recentJobs[6][0].finishedOn).toLocaleString()
: "Never",
lastRunTimestamp: recentJobs[6][0]?.finishedOn || 0,
status: recentJobs[6][0]?.failedReason
? "Failed"
: recentJobs[6][0]
? "Success"
: "Never run",
stats: stats[QUEUE_NAMES.SYSTEM_STATISTICS],
},
].sort((a, b) => {
// Sort by last run timestamp (most recent first)
// If both have never run (timestamp 0), maintain original order

View File

@@ -564,174 +564,216 @@ router.get(
const startDate = new Date();
startDate.setDate(endDate.getDate() - daysInt);
// Build where clause
const whereClause = {
timestamp: {
gte: startDate,
lte: endDate,
},
};
// Add host filter if specified
if (hostId && hostId !== "all" && hostId !== "undefined") {
whereClause.host_id = hostId;
}
// Get all update history records in the date range
const trendsData = await prisma.update_history.findMany({
where: whereClause,
select: {
timestamp: true,
packages_count: true,
security_count: true,
total_packages: true,
host_id: true,
status: true,
},
orderBy: {
timestamp: "asc",
},
});
// Enhanced data validation and processing
const processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.packages_count >= 0 &&
record.security_count >= 0 &&
record.security_count <= record.packages_count && // Security can't exceed outdated
record.status === "success"
); // Only include successful reports
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For hourly view, group by hour only (not minutes)
timeKey = date.toISOString().substring(0, 13); // YYYY-MM-DDTHH
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}
return {
timeKey,
total_packages: record.total_packages,
packages_count: record.packages_count || 0,
security_count: record.security_count || 0,
host_id: record.host_id,
timestamp: record.timestamp,
};
});
// Determine if we need aggregation based on host filter
const needsAggregation =
!hostId || hostId === "all" || hostId === "undefined";
let trendsData;
if (needsAggregation) {
// For "All Hosts" mode, use system_statistics table
trendsData = await prisma.system_statistics.findMany({
where: {
timestamp: {
gte: startDate,
lte: endDate,
},
},
select: {
timestamp: true,
unique_packages_count: true,
unique_security_count: true,
total_packages: true,
total_hosts: true,
hosts_needing_updates: true,
},
orderBy: {
timestamp: "asc",
},
});
} else {
// For individual host, use update_history table
trendsData = await prisma.update_history.findMany({
where: {
host_id: hostId,
timestamp: {
gte: startDate,
lte: endDate,
},
},
select: {
timestamp: true,
packages_count: true,
security_count: true,
total_packages: true,
host_id: true,
status: true,
},
orderBy: {
timestamp: "asc",
},
});
}
// Process data based on source
let processedData;
let aggregatedArray;
if (needsAggregation) {
// For "All Hosts" mode, we need to calculate the actual total packages differently
// Instead of aggregating historical data (which is per-host), we'll use the current total
// and show that as a flat line, since total packages don't change much over time
// For "All Hosts" mode, data comes from system_statistics table
// Already aggregated, just need to format it
processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.unique_packages_count >= 0 &&
record.unique_security_count >= 0 &&
record.unique_security_count <= record.unique_packages_count
);
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
// Get the current total packages count (unique packages across all hosts)
const currentTotalPackages = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
if (daysInt <= 1) {
// For "Last 24 hours", use full timestamp for each data point
// This allows plotting all individual data points
timeKey = date.toISOString(); // Full ISO timestamp
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}
// Aggregate data by timeKey when looking at "All Hosts" or no specific host
const aggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: currentTotalPackages, // Use current total packages
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set(),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
return {
timeKey,
total_packages: record.total_packages,
packages_count: record.unique_packages_count,
security_count: record.unique_security_count,
timestamp: record.timestamp,
};
}
});
// For outdated and security packages: SUM (these represent counts across hosts)
acc[item.timeKey].packages_count += item.packages_count;
acc[item.timeKey].security_count += item.security_count;
if (daysInt <= 1) {
// For "Last 24 hours", use all individual data points without grouping
// Sort by timestamp
aggregatedArray = processedData.sort(
(a, b) => a.timestamp.getTime() - b.timestamp.getTime(),
);
} else {
// For longer periods, group by timeKey and take the latest value for each period
const aggregatedData = processedData.reduce((acc, item) => {
if (
!acc[item.timeKey] ||
item.timestamp > acc[item.timeKey].timestamp
) {
acc[item.timeKey] = item;
}
return acc;
}, {});
acc[item.timeKey].record_count += 1;
acc[item.timeKey].host_ids.add(item.host_id);
// Track timestamp range
if (item.timestamp < acc[item.timeKey].min_timestamp) {
acc[item.timeKey].min_timestamp = item.timestamp;
}
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].max_timestamp = item.timestamp;
}
return acc;
}, {});
// Convert to array and add metadata
aggregatedArray = Object.values(aggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
// Convert to array and sort
aggregatedArray = Object.values(aggregatedData).sort((a, b) =>
a.timeKey.localeCompare(b.timeKey),
);
}
} else {
// For specific host, show individual data points without aggregation
// But still group by timeKey to handle multiple reports from same host in same time period
const hostAggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: 0,
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set([item.host_id]),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
// For individual host, data comes from update_history table
processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.packages_count >= 0 &&
record.security_count >= 0 &&
record.security_count <= record.packages_count &&
record.status === "success"
);
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For "Last 24 hours", use full timestamp for each data point
// This allows plotting all individual data points
timeKey = date.toISOString(); // Full ISO timestamp
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}
return {
timeKey,
total_packages: record.total_packages,
packages_count: record.packages_count || 0,
security_count: record.security_count || 0,
host_id: record.host_id,
timestamp: record.timestamp,
};
}
});
// For same host, take the latest values (not sum)
// This handles cases where a host reports multiple times in the same time period
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].total_packages = item.total_packages;
acc[item.timeKey].packages_count = item.packages_count;
acc[item.timeKey].security_count = item.security_count;
acc[item.timeKey].max_timestamp = item.timestamp;
}
if (daysInt <= 1) {
// For "Last 24 hours", use all individual data points without grouping
// Sort by timestamp
aggregatedArray = processedData.sort(
(a, b) => a.timestamp.getTime() - b.timestamp.getTime(),
);
} else {
// For longer periods, group by timeKey to handle multiple reports from same host in same time period
const hostAggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: 0,
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set([item.host_id]),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
};
}
acc[item.timeKey].record_count += 1;
// For same host, take the latest values (not sum)
// This handles cases where a host reports multiple times in the same time period
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].total_packages = item.total_packages;
acc[item.timeKey].packages_count = item.packages_count;
acc[item.timeKey].security_count = item.security_count;
acc[item.timeKey].max_timestamp = item.timestamp;
}
return acc;
}, {});
acc[item.timeKey].record_count += 1;
// Convert to array
aggregatedArray = Object.values(hostAggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
return acc;
}, {});
// Convert to array
aggregatedArray = Object.values(hostAggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
}
}
// Handle sparse data by filling missing time periods
const fillMissingPeriods = (data, daysInt) => {
if (data.length === 0) {
return [];
}
// For "Last 24 hours", return data as-is without filling gaps
// This allows plotting all individual data points
if (daysInt <= 1) {
return data;
}
const filledData = [];
const startDate = new Date();
startDate.setDate(startDate.getDate() - daysInt);
@@ -741,50 +783,58 @@ router.get(
const endDate = new Date();
const currentDate = new Date(startDate);
// Find the last known values for interpolation
// Sort data by timeKey to get chronological order
const sortedData = [...data].sort((a, b) =>
a.timeKey.localeCompare(b.timeKey),
);
// Find the first actual data point (don't fill before this)
const firstDataPoint = sortedData[0];
const firstDataTimeKey = firstDataPoint?.timeKey;
// Track last known values as we iterate forward
let lastKnownValues = null;
if (data.length > 0) {
lastKnownValues = {
total_packages: data[0].total_packages,
packages_count: data[0].packages_count,
security_count: data[0].security_count,
};
}
let hasSeenFirstDataPoint = false;
while (currentDate <= endDate) {
let timeKey;
if (daysInt <= 1) {
timeKey = currentDate.toISOString().substring(0, 13); // Hourly
currentDate.setHours(currentDate.getHours() + 1);
} else {
timeKey = currentDate.toISOString().split("T")[0]; // Daily
currentDate.setDate(currentDate.getDate() + 1);
// For daily view, group by day
timeKey = currentDate.toISOString().split("T")[0]; // YYYY-MM-DD
currentDate.setDate(currentDate.getDate() + 1);
// Skip periods before the first actual data point
if (firstDataTimeKey && timeKey < firstDataTimeKey) {
continue;
}
if (dataMap.has(timeKey)) {
const item = dataMap.get(timeKey);
filledData.push(item);
// Update last known values
// Update last known values with actual data
lastKnownValues = {
total_packages: item.total_packages,
packages_count: item.packages_count,
security_count: item.security_count,
total_packages: item.total_packages || 0,
packages_count: item.packages_count || 0,
security_count: item.security_count || 0,
};
hasSeenFirstDataPoint = true;
} else {
// For missing periods, use the last known values (interpolation)
// This creates a continuous line instead of gaps
filledData.push({
timeKey,
total_packages: lastKnownValues?.total_packages || 0,
packages_count: lastKnownValues?.packages_count || 0,
security_count: lastKnownValues?.security_count || 0,
record_count: 0,
host_count: 0,
host_ids: [],
min_timestamp: null,
max_timestamp: null,
isInterpolated: true, // Mark as interpolated for debugging
});
// For missing periods AFTER the first data point, use forward-fill
// Only fill if we have a last known value and we've seen the first data point
if (lastKnownValues !== null && hasSeenFirstDataPoint) {
filledData.push({
timeKey,
total_packages: lastKnownValues.total_packages,
packages_count: lastKnownValues.packages_count,
security_count: lastKnownValues.security_count,
record_count: 0,
host_count: 0,
host_ids: [],
min_timestamp: null,
max_timestamp: null,
isInterpolated: true, // Mark as interpolated for debugging
});
}
// If we haven't seen the first data point yet, skip this period
}
}
@@ -810,7 +860,7 @@ router.get(
// Get current package state for offline fallback
let currentPackageState = null;
if (hostId && hostId !== "all" && hostId !== "undefined") {
// Get current package counts for specific host
// For individual host, get current package counts from host_packages
const currentState = await prisma.host_packages.aggregate({
where: {
host_id: hostId,
@@ -841,34 +891,64 @@ router.get(
security_count: securityCount,
};
} else {
// Get current package counts for all hosts
// Total packages = count of unique packages installed on at least one host
const totalPackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
// For "All Hosts" mode, use the latest system_statistics record if available
// Otherwise calculate from database
const latestStats = await prisma.system_statistics.findFirst({
orderBy: {
timestamp: "desc",
},
select: {
total_packages: true,
unique_packages_count: true,
unique_security_count: true,
timestamp: true,
},
});
if (latestStats) {
// Use latest system statistics (collected by scheduled job)
currentPackageState = {
total_packages: latestStats.total_packages,
packages_count: latestStats.unique_packages_count,
security_count: latestStats.unique_security_count,
};
} else {
// Fallback: calculate from database if no statistics collected yet
const totalPackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
},
});
});
// Get counts for boolean fields separately
const outdatedCount = await prisma.host_packages.count({
where: {
needs_update: true,
},
});
const uniqueOutdatedCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
},
},
},
});
const securityCount = await prisma.host_packages.count({
where: {
is_security_update: true,
},
});
const uniqueSecurityCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
is_security_update: true,
},
},
},
});
currentPackageState = {
total_packages: totalPackagesCount,
packages_count: outdatedCount,
security_count: securityCount,
};
currentPackageState = {
total_packages: totalPackagesCount,
packages_count: uniqueOutdatedCount,
security_count: uniqueSecurityCount,
};
}
}
// Format data for chart
@@ -923,6 +1003,11 @@ router.get(
chartData.datasets[2].data.push(item.security_count);
});
// Replace the last label with "Now" to indicate current state
if (chartData.labels.length > 0) {
chartData.labels[chartData.labels.length - 1] = "Now";
}
// Calculate data quality metrics
const dataQuality = {
totalRecords: trendsData.length,

View File

@@ -11,10 +11,16 @@ const {
requireManageSettings,
} = require("../middleware/permissions");
const { queueManager, QUEUE_NAMES } = require("../services/automation");
const { pushIntegrationToggle, isConnected } = require("../services/agentWs");
const agentVersionService = require("../services/agentVersionService");
const router = express.Router();
const prisma = getPrismaClient();
// In-memory cache for integration states (api_id -> { integration_name -> enabled })
// This stores the last known state from successful toggles
const integrationStateCache = new Map();
// Secure endpoint to download the agent script/binary (requires API authentication)
router.get("/agent/download", async (req, res) => {
try {
@@ -128,9 +134,6 @@ router.get("/agent/version", async (req, res) => {
try {
const fs = require("node:fs");
const path = require("node:path");
const { exec } = require("node:child_process");
const { promisify } = require("node:util");
const execAsync = promisify(exec);
// Get architecture parameter (default to amd64 for Go agents)
const architecture = req.query.arch || "amd64";
@@ -165,53 +168,108 @@ router.get("/agent/version", async (req, res) => {
minServerVersion: null,
});
} else {
// Go agent version check (binary)
const binaryName = `patchmon-agent-linux-${architecture}`;
const binaryPath = path.join(__dirname, "../../../agents", binaryName);
// Go agent version check
// Detect server architecture and map to Go architecture names
const os = require("node:os");
const { exec } = require("node:child_process");
const { promisify } = require("node:util");
const execAsync = promisify(exec);
if (!fs.existsSync(binaryPath)) {
return res.status(404).json({
error: `Go agent binary not found for architecture: ${architecture}`,
});
const serverArch = os.arch();
// Map Node.js architecture to Go architecture names
const archMap = {
x64: "amd64",
ia32: "386",
arm64: "arm64",
arm: "arm",
};
const serverGoArch = archMap[serverArch] || serverArch;
// If requested architecture matches server architecture, execute the binary
if (architecture === serverGoArch) {
const binaryName = `patchmon-agent-linux-${architecture}`;
const binaryPath = path.join(__dirname, "../../../agents", binaryName);
if (!fs.existsSync(binaryPath)) {
// Binary doesn't exist, fall back to GitHub
console.log(`Binary ${binaryName} not found, falling back to GitHub`);
} else {
// Execute the binary to get its version
try {
const { stdout } = await execAsync(`${binaryPath} --help`, {
timeout: 10000,
});
// Parse version from help output (e.g., "PatchMon Agent v1.3.1")
const versionMatch = stdout.match(
/PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i,
);
if (versionMatch) {
const serverVersion = versionMatch[1];
const agentVersion = req.query.currentVersion || serverVersion;
// Simple version comparison (assuming semantic versioning)
const hasUpdate = agentVersion !== serverVersion;
return res.json({
currentVersion: agentVersion,
latestVersion: serverVersion,
hasUpdate: hasUpdate,
downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`,
releaseNotes: `PatchMon Agent v${serverVersion}`,
minServerVersion: null,
architecture: architecture,
agentType: "go",
});
}
} catch (execError) {
// Execution failed, fall back to GitHub
console.log(
`Failed to execute binary ${binaryName}: ${execError.message}, falling back to GitHub`,
);
}
}
}
// Execute the binary to get its version
// Fall back to GitHub if architecture doesn't match or binary execution failed
try {
const { stdout } = await execAsync(`${binaryPath} --help`, {
timeout: 10000,
});
const versionInfo = await agentVersionService.getVersionInfo();
const latestVersion = versionInfo.latestVersion;
const agentVersion =
req.query.currentVersion || latestVersion || "unknown";
// Parse version from help output (e.g., "PatchMon Agent v1.3.1")
const versionMatch = stdout.match(
/PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i,
);
if (!versionMatch) {
return res.status(500).json({
error: "Could not extract version from agent binary",
if (!latestVersion) {
return res.status(503).json({
error: "Unable to determine latest version from GitHub releases",
currentVersion: agentVersion,
latestVersion: null,
hasUpdate: false,
});
}
const serverVersion = versionMatch[1];
const agentVersion = req.query.currentVersion || serverVersion;
// Simple version comparison (assuming semantic versioning)
const hasUpdate = agentVersion !== serverVersion;
const hasUpdate =
agentVersion !== latestVersion && latestVersion !== null;
res.json({
currentVersion: agentVersion,
latestVersion: serverVersion,
latestVersion: latestVersion,
hasUpdate: hasUpdate,
downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`,
releaseNotes: `PatchMon Agent v${serverVersion}`,
releaseNotes: `PatchMon Agent v${latestVersion}`,
minServerVersion: null,
architecture: architecture,
agentType: "go",
});
} catch (execError) {
console.error("Failed to execute agent binary:", execError.message);
} catch (serviceError) {
console.error(
"Failed to get version from agentVersionService:",
serviceError.message,
);
return res.status(500).json({
error: "Failed to get version from agent binary",
error: "Failed to get agent version from service",
details: serviceError.message,
});
}
}
@@ -1616,10 +1674,14 @@ router.get("/install", async (req, res) => {
// Check for --force parameter
const forceInstall = req.query.force === "true" || req.query.force === "1";
// Get architecture parameter (default to amd64)
const architecture = req.query.arch || "amd64";
// Get architecture parameter (only set if explicitly provided, otherwise let script auto-detect)
const architecture = req.query.arch;
// Inject the API credentials, server URL, curl flags, SSL verify flag, force flag, and architecture into the script
// Only set ARCHITECTURE if explicitly provided, otherwise let the script auto-detect
const archExport = architecture
? `export ARCHITECTURE="${architecture}"\n`
: "";
const envVars = `#!/bin/bash
export PATCHMON_URL="${serverUrl}"
export API_ID="${host.api_id}"
@@ -1627,8 +1689,7 @@ export API_KEY="${host.api_key}"
export CURL_FLAGS="${curlFlags}"
export SKIP_SSL_VERIFY="${skipSSLVerify}"
export FORCE_INSTALL="${forceInstall ? "true" : "false"}"
export ARCHITECTURE="${architecture}"
${archExport}
`;
// Remove the shebang from the original script and prepend our env vars
@@ -2103,4 +2164,137 @@ router.patch(
},
);
// Get integration status for a host
router.get(
"/:hostId/integrations",
authenticateToken,
requireManageHosts,
async (req, res) => {
try {
const { hostId } = req.params;
// Get host to verify it exists
const host = await prisma.hosts.findUnique({
where: { id: hostId },
select: { id: true, api_id: true, friendly_name: true },
});
if (!host) {
return res.status(404).json({ error: "Host not found" });
}
// Check if agent is connected
const connected = isConnected(host.api_id);
// Get integration states from cache (or defaults if not cached)
// Default: all integrations are disabled
const cachedState = integrationStateCache.get(host.api_id) || {};
const integrations = {
docker: cachedState.docker || false, // Default: disabled
// Future integrations can be added here
};
res.json({
success: true,
data: {
integrations,
connected,
host: {
id: host.id,
friendlyName: host.friendly_name,
apiId: host.api_id,
},
},
});
} catch (error) {
console.error("Get integration status error:", error);
res.status(500).json({ error: "Failed to get integration status" });
}
},
);
// Toggle integration status for a host
router.post(
"/:hostId/integrations/:integrationName/toggle",
authenticateToken,
requireManageHosts,
[body("enabled").isBoolean().withMessage("Enabled status must be a boolean")],
async (req, res) => {
try {
const errors = validationResult(req);
if (!errors.isEmpty()) {
return res.status(400).json({ errors: errors.array() });
}
const { hostId, integrationName } = req.params;
const { enabled } = req.body;
// Validate integration name
const validIntegrations = ["docker"]; // Add more as they're implemented
if (!validIntegrations.includes(integrationName)) {
return res.status(400).json({
error: "Invalid integration name",
validIntegrations,
});
}
// Get host to verify it exists
const host = await prisma.hosts.findUnique({
where: { id: hostId },
select: { id: true, api_id: true, friendly_name: true },
});
if (!host) {
return res.status(404).json({ error: "Host not found" });
}
// Check if agent is connected
if (!isConnected(host.api_id)) {
return res.status(503).json({
error: "Agent is not connected",
message:
"The agent must be connected via WebSocket to toggle integrations",
});
}
// Send WebSocket message to agent
const success = pushIntegrationToggle(
host.api_id,
integrationName,
enabled,
);
if (!success) {
return res.status(503).json({
error: "Failed to send integration toggle",
message: "Agent connection may have been lost",
});
}
// Update cache with new state
if (!integrationStateCache.has(host.api_id)) {
integrationStateCache.set(host.api_id, {});
}
integrationStateCache.get(host.api_id)[integrationName] = enabled;
res.json({
success: true,
message: `Integration ${integrationName} ${enabled ? "enabled" : "disabled"} successfully`,
data: {
integration: integrationName,
enabled,
host: {
id: host.id,
friendlyName: host.friendly_name,
apiId: host.api_id,
},
},
});
} catch (error) {
console.error("Toggle integration error:", error);
res.status(500).json({ error: "Failed to toggle integration" });
}
},
);
module.exports = router;

View File

@@ -60,9 +60,14 @@ router.post(
authenticateToken,
[
body("token")
.notEmpty()
.withMessage("Token is required")
.isString()
.withMessage("Token must be a string")
.isLength({ min: 6, max: 6 })
.withMessage("Token must be 6 digits"),
body("token").isNumeric().withMessage("Token must contain only numbers"),
.withMessage("Token must be exactly 6 digits")
.matches(/^\d{6}$/)
.withMessage("Token must contain only numbers"),
],
async (req, res) => {
try {
@@ -71,7 +76,11 @@ router.post(
return res.status(400).json({ errors: errors.array() });
}
const { token } = req.body;
// Ensure token is a string (convert if needed)
let { token } = req.body;
if (typeof token !== "string") {
token = String(token);
}
const userId = req.user.id;
// Get user's TFA secret

View File

@@ -49,12 +49,12 @@ function init(server, prismaClient) {
// Accept the WebSocket connection for Bull Board
wss.handleUpgrade(request, socket, head, (ws) => {
ws.on("message", (message) => {
// Echo back for Bull Board WebSocket
try {
ws.send(message);
} catch (_err) {
// Ignore send errors (connection may be closed)
}
// Echo back for Bull Board WebSocket
try {
ws.send(message);
} catch (_err) {
// Ignore send errors (connection may be closed)
}
});
ws.on("error", (err) => {
@@ -255,6 +255,29 @@ function pushUpdateAgent(apiId) {
safeSend(ws, JSON.stringify({ type: "update_agent" }));
}
function pushIntegrationToggle(apiId, integrationName, enabled) {
const ws = apiIdToSocket.get(apiId);
if (ws && ws.readyState === WebSocket.OPEN) {
safeSend(
ws,
JSON.stringify({
type: "integration_toggle",
integration: integrationName,
enabled: enabled,
}),
);
console.log(
`📤 Pushed integration toggle to agent ${apiId}: ${integrationName} = ${enabled}`,
);
return true;
} else {
console.log(
`⚠️ Agent ${apiId} not connected, cannot push integration toggle, please edit config.yml manually`,
);
return false;
}
}
function getConnectionByApiId(apiId) {
return apiIdToSocket.get(apiId);
}
@@ -414,6 +437,7 @@ module.exports = {
pushReportNow,
pushSettingsUpdate,
pushUpdateAgent,
pushIntegrationToggle,
pushUpdateNotification,
pushUpdateNotificationToAll,
// Expose read-only view of connected agents

View File

@@ -13,6 +13,7 @@ const OrphanedPackageCleanup = require("./orphanedPackageCleanup");
const DockerInventoryCleanup = require("./dockerInventoryCleanup");
const DockerImageUpdateCheck = require("./dockerImageUpdateCheck");
const MetricsReporting = require("./metricsReporting");
const SystemStatistics = require("./systemStatistics");
// Queue names
const QUEUE_NAMES = {
@@ -23,6 +24,7 @@ const QUEUE_NAMES = {
DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup",
DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check",
METRICS_REPORTING: "metrics-reporting",
SYSTEM_STATISTICS: "system-statistics",
AGENT_COMMANDS: "agent-commands",
};
@@ -106,6 +108,9 @@ class QueueManager {
this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting(
this,
);
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS] = new SystemStatistics(
this,
);
console.log("✅ All automation classes initialized");
}
@@ -191,6 +196,15 @@ class QueueManager {
workerOptions,
);
// System Statistics Worker
this.workers[QUEUE_NAMES.SYSTEM_STATISTICS] = new Worker(
QUEUE_NAMES.SYSTEM_STATISTICS,
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].process.bind(
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS],
),
workerOptions,
);
// Agent Commands Worker
this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker(
QUEUE_NAMES.AGENT_COMMANDS,
@@ -323,6 +337,7 @@ class QueueManager {
await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule();
await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule();
await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule();
await this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].schedule();
}
/**
@@ -358,6 +373,10 @@ class QueueManager {
].triggerManual();
}
async triggerSystemStatistics() {
return this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].triggerManual();
}
async triggerMetricsReporting() {
return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual();
}

View File

@@ -0,0 +1,140 @@
const { prisma } = require("./shared/prisma");
const { v4: uuidv4 } = require("uuid");
/**
* System Statistics Collection Automation
* Collects aggregated system-wide statistics every 30 minutes
* for use in package trends charts
*/
class SystemStatistics {
constructor(queueManager) {
this.queueManager = queueManager;
this.queueName = "system-statistics";
}
/**
* Process system statistics collection job
*/
async process(_job) {
const startTime = Date.now();
console.log("📊 Starting system statistics collection...");
try {
// Calculate unique package counts across all hosts
const uniquePackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
},
},
},
});
const uniqueSecurityCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
is_security_update: true,
},
},
},
});
// Calculate total unique packages installed on at least one host
const totalPackages = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
// Calculate total hosts
const totalHosts = await prisma.hosts.count({
where: {
status: "active",
},
});
// Calculate hosts needing updates (distinct hosts with packages needing updates)
const hostsNeedingUpdates = await prisma.hosts.count({
where: {
status: "active",
host_packages: {
some: {
needs_update: true,
},
},
},
});
// Store statistics in database
await prisma.system_statistics.create({
data: {
id: uuidv4(),
unique_packages_count: uniquePackagesCount,
unique_security_count: uniqueSecurityCount,
total_packages: totalPackages,
total_hosts: totalHosts,
hosts_needing_updates: hostsNeedingUpdates,
timestamp: new Date(),
},
});
const executionTime = Date.now() - startTime;
console.log(
`✅ System statistics collection completed in ${executionTime}ms - Unique packages: ${uniquePackagesCount}, Security: ${uniqueSecurityCount}, Total hosts: ${totalHosts}`,
);
return {
success: true,
uniquePackagesCount,
uniqueSecurityCount,
totalPackages,
totalHosts,
hostsNeedingUpdates,
executionTime,
};
} catch (error) {
const executionTime = Date.now() - startTime;
console.error(
`❌ System statistics collection failed after ${executionTime}ms:`,
error.message,
);
throw error;
}
}
/**
* Schedule recurring system statistics collection (every 30 minutes)
*/
async schedule() {
const job = await this.queueManager.queues[this.queueName].add(
"system-statistics",
{},
{
repeat: { pattern: "*/30 * * * *" }, // Every 30 minutes
jobId: "system-statistics-recurring",
},
);
console.log("✅ System statistics collection scheduled (every 30 minutes)");
return job;
}
/**
* Trigger manual system statistics collection
*/
async triggerManual() {
const job = await this.queueManager.queues[this.queueName].add(
"system-statistics-manual",
{},
{ priority: 1 },
);
console.log("✅ Manual system statistics collection triggered");
return job;
}
}
module.exports = SystemStatistics;

View File

@@ -65,7 +65,7 @@ function parse_date(date_string, fallback = null) {
if (!date_string) {
return fallback || get_current_time();
}
try {
const date = new Date(date_string);
if (Number.isNaN(date.getTime())) {

View File

@@ -1,5 +1,5 @@
{
"$schema": "https://biomejs.dev/schemas/2.3.0/schema.json",
"$schema": "https://biomejs.dev/schemas/2.3.4/schema.json",
"vcs": {
"enabled": true,
"clientKind": "git",

View File

@@ -120,7 +120,6 @@ const Layout = ({ children }) => {
name: "Automation",
href: "/automation",
icon: RefreshCw,
new: true,
});
if (canViewReports()) {

View File

@@ -196,6 +196,25 @@ const Automation = () => {
year: "numeric",
});
}
if (schedule === "Every 30 minutes") {
const now = new Date();
const nextRun = new Date(now);
// Round up to the next 30-minute mark
const minutes = now.getMinutes();
if (minutes < 30) {
nextRun.setMinutes(30, 0, 0);
} else {
nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0);
}
return nextRun.toLocaleString([], {
hour12: true,
hour: "numeric",
minute: "2-digit",
day: "numeric",
month: "numeric",
year: "numeric",
});
}
return "Unknown";
};
@@ -236,6 +255,18 @@ const Automation = () => {
nextHour.setHours(nextHour.getHours() + 1, 0, 0, 0);
return nextHour.getTime();
}
if (schedule === "Every 30 minutes") {
const now = new Date();
const nextRun = new Date(now);
// Round up to the next 30-minute mark
const minutes = now.getMinutes();
if (minutes < 30) {
nextRun.setMinutes(30, 0, 0);
} else {
nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0);
}
return nextRun.getTime();
}
return Number.MAX_SAFE_INTEGER; // Unknown schedules go to bottom
};
@@ -294,6 +325,8 @@ const Automation = () => {
endpoint = "/automation/trigger/docker-inventory-cleanup";
} else if (jobType === "agent-collection") {
endpoint = "/automation/trigger/agent-collection";
} else if (jobType === "system-statistics") {
endpoint = "/automation/trigger/system-statistics";
}
const _response = await api.post(endpoint, data);
@@ -615,6 +648,10 @@ const Automation = () => {
automation.queue.includes("agent-commands")
) {
triggerManualJob("agent-collection");
} else if (
automation.queue.includes("system-statistics")
) {
triggerManualJob("system-statistics");
}
}}
className="inline-flex items-center justify-center w-6 h-6 border border-transparent rounded text-white bg-green-600 hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-green-500 transition-colors duration-200"

View File

@@ -55,6 +55,8 @@ const Dashboard = () => {
const [cardPreferences, setCardPreferences] = useState([]);
const [packageTrendsPeriod, setPackageTrendsPeriod] = useState("1"); // days
const [packageTrendsHost, setPackageTrendsHost] = useState("all"); // host filter
const [systemStatsJobId, setSystemStatsJobId] = useState(null); // Track job ID for system statistics
const [isTriggeringJob, setIsTriggeringJob] = useState(false);
const navigate = useNavigate();
const { isDark } = useTheme();
const { user } = useAuth();
@@ -772,56 +774,108 @@ const Dashboard = () => {
<h3 className="text-lg font-medium text-secondary-900 dark:text-white">
Package Trends Over Time
</h3>
<div className="flex items-center gap-3">
{/* Refresh Button */}
<button
type="button"
onClick={() => refetchPackageTrends()}
disabled={packageTrendsFetching}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white hover:bg-secondary-50 dark:hover:bg-secondary-700 focus:ring-2 focus:ring-primary-500 focus:border-primary-500 disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2"
title="Refresh data"
>
<RefreshCw
className={`h-4 w-4 ${packageTrendsFetching ? "animate-spin" : ""}`}
/>
Refresh
</button>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-3">
{/* Refresh Button */}
<button
type="button"
onClick={async () => {
if (packageTrendsHost === "all") {
// For "All Hosts", trigger system statistics collection job
setIsTriggeringJob(true);
try {
const response =
await dashboardAPI.triggerSystemStatistics();
if (response.data?.data?.jobId) {
setSystemStatsJobId(response.data.data.jobId);
// Wait a moment for the job to complete, then refetch
setTimeout(() => {
refetchPackageTrends();
}, 2000);
// Clear the job ID message after 2 seconds
setTimeout(() => {
setSystemStatsJobId(null);
}, 2000);
}
} catch (error) {
console.error(
"Failed to trigger system statistics:",
error,
);
// Still refetch data even if job trigger fails
refetchPackageTrends();
} finally {
setIsTriggeringJob(false);
}
} else {
// For individual host, just refetch the data
refetchPackageTrends();
}
}}
disabled={packageTrendsFetching || isTriggeringJob}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white hover:bg-secondary-50 dark:hover:bg-secondary-700 focus:ring-2 focus:ring-primary-500 focus:border-primary-500 disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2"
title={
packageTrendsHost === "all"
? "Trigger system statistics collection"
: "Refresh data"
}
>
<RefreshCw
className={`h-4 w-4 ${
packageTrendsFetching || isTriggeringJob
? "animate-spin"
: ""
}`}
/>
Refresh
</button>
{/* Period Selector */}
<select
value={packageTrendsPeriod}
onChange={(e) => setPackageTrendsPeriod(e.target.value)}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
>
<option value="1">Last 24 hours</option>
<option value="7">Last 7 days</option>
<option value="30">Last 30 days</option>
<option value="90">Last 90 days</option>
<option value="180">Last 6 months</option>
<option value="365">Last year</option>
</select>
{/* Period Selector */}
<select
value={packageTrendsPeriod}
onChange={(e) => setPackageTrendsPeriod(e.target.value)}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
>
<option value="1">Last 24 hours</option>
<option value="7">Last 7 days</option>
<option value="30">Last 30 days</option>
<option value="90">Last 90 days</option>
<option value="180">Last 6 months</option>
<option value="365">Last year</option>
</select>
{/* Host Selector */}
<select
value={packageTrendsHost}
onChange={(e) => setPackageTrendsHost(e.target.value)}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
>
<option value="all">All Hosts</option>
{packageTrendsData?.hosts?.length > 0 ? (
packageTrendsData.hosts.map((host) => (
<option key={host.id} value={host.id}>
{host.friendly_name || host.hostname}
{/* Host Selector */}
<select
value={packageTrendsHost}
onChange={(e) => {
setPackageTrendsHost(e.target.value);
// Clear job ID message when host selection changes
setSystemStatsJobId(null);
}}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
>
<option value="all">All Hosts</option>
{packageTrendsData?.hosts?.length > 0 ? (
packageTrendsData.hosts.map((host) => (
<option key={host.id} value={host.id}>
{host.friendly_name || host.hostname}
</option>
))
) : (
<option disabled>
{packageTrendsLoading
? "Loading hosts..."
: "No hosts available"}
</option>
))
) : (
<option disabled>
{packageTrendsLoading
? "Loading hosts..."
: "No hosts available"}
</option>
)}
</select>
)}
</select>
</div>
{/* Job ID Message */}
{systemStatsJobId && packageTrendsHost === "all" && (
<p className="text-xs text-secondary-600 dark:text-secondary-400 ml-1">
Ran collection job #{systemStatsJobId}
</p>
)}
</div>
</div>
@@ -1167,13 +1221,40 @@ const Dashboard = () => {
title: (context) => {
const label = context[0].label;
// Handle "Now" label
if (label === "Now") {
return "Now";
}
// Handle empty or invalid labels
if (!label || typeof label !== "string") {
return "Unknown Date";
}
// Check if it's a full ISO timestamp (for "Last 24 hours")
// Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000"
if (label.includes("T") && label.includes(":")) {
try {
const date = new Date(label);
// Check if date is valid
if (Number.isNaN(date.getTime())) {
return label; // Return original label if date is invalid
}
// Format full ISO timestamp with date and time
return date.toLocaleDateString("en-US", {
month: "short",
day: "numeric",
hour: "numeric",
minute: "2-digit",
hour12: true,
});
} catch (_error) {
return label; // Return original label if parsing fails
}
}
// Format hourly labels (e.g., "2025-10-07T14" -> "Oct 7, 2:00 PM")
if (label.includes("T")) {
if (label.includes("T") && !label.includes(":")) {
try {
const date = new Date(`${label}:00:00`);
// Check if date is valid
@@ -1233,13 +1314,41 @@ const Dashboard = () => {
callback: function (value, _index, _ticks) {
const label = this.getLabelForValue(value);
// Handle "Now" label
if (label === "Now") {
return "Now";
}
// Handle empty or invalid labels
if (!label || typeof label !== "string") {
return "Unknown";
}
// Check if it's a full ISO timestamp (for "Last 24 hours")
// Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000"
if (label.includes("T") && label.includes(":")) {
try {
const date = new Date(label);
// Check if date is valid
if (Number.isNaN(date.getTime())) {
return label; // Return original label if date is invalid
}
// Extract hour from full ISO timestamp
const hourNum = date.getHours();
return hourNum === 0
? "12 AM"
: hourNum < 12
? `${hourNum} AM`
: hourNum === 12
? "12 PM"
: `${hourNum - 12} PM`;
} catch (_error) {
return label; // Return original label if parsing fails
}
}
// Format hourly labels (e.g., "2025-10-07T14" -> "2 PM")
if (label.includes("T")) {
if (label.includes("T") && !label.includes(":")) {
try {
const hour = label.split("T")[1];
const hourNum = parseInt(hour, 10);

View File

@@ -281,6 +281,67 @@ const HostDetail = () => {
},
});
// Fetch integration status
const {
data: integrationsData,
isLoading: isLoadingIntegrations,
refetch: refetchIntegrations,
} = useQuery({
queryKey: ["host-integrations", hostId],
queryFn: () =>
adminHostsAPI.getIntegrations(hostId).then((res) => res.data),
staleTime: 30 * 1000, // 30 seconds
refetchOnWindowFocus: false,
enabled: !!hostId && activeTab === "integrations",
});
// Refetch integrations when WebSocket status changes (e.g., after agent restart)
useEffect(() => {
if (
wsStatus?.connected &&
activeTab === "integrations" &&
integrationsData?.data?.connected === false
) {
// Agent just reconnected, refetch integrations to get updated connection status
refetchIntegrations();
}
}, [
wsStatus?.connected,
activeTab,
integrationsData?.data?.connected,
refetchIntegrations,
]);
// Toggle integration mutation
const toggleIntegrationMutation = useMutation({
mutationFn: ({ integrationName, enabled }) =>
adminHostsAPI
.toggleIntegration(hostId, integrationName, enabled)
.then((res) => res.data),
onSuccess: (data) => {
// Optimistically update the cache with the new state
queryClient.setQueryData(["host-integrations", hostId], (oldData) => {
if (!oldData) return oldData;
return {
...oldData,
data: {
...oldData.data,
integrations: {
...oldData.data.integrations,
[data.data.integration]: data.data.enabled,
},
},
};
});
// Also invalidate to ensure we get fresh data
queryClient.invalidateQueries(["host-integrations", hostId]);
},
onError: () => {
// On error, refetch to get the actual state
refetchIntegrations();
},
});
const handleDeleteHost = async () => {
if (
window.confirm(
@@ -666,6 +727,17 @@ const HostDetail = () => {
>
Notes
</button>
<button
type="button"
onClick={() => handleTabChange("integrations")}
className={`px-4 py-2 text-sm font-medium ${
activeTab === "integrations"
? "text-primary-600 dark:text-primary-400 border-b-2 border-primary-500"
: "text-secondary-500 dark:text-secondary-400 hover:text-secondary-700 dark:hover:text-secondary-300"
}`}
>
Integrations
</button>
</div>
<div className="p-4">
@@ -1446,6 +1518,101 @@ const HostDetail = () => {
{/* Agent Queue */}
{activeTab === "queue" && <AgentQueueTab hostId={hostId} />}
{/* Integrations */}
{activeTab === "integrations" && (
<div className="max-w-2xl space-y-4">
{isLoadingIntegrations ? (
<div className="flex items-center justify-center h-32">
<RefreshCw className="h-6 w-6 animate-spin text-primary-600" />
</div>
) : (
<div className="space-y-4">
{/* Docker Integration */}
<div className="bg-secondary-50 dark:bg-secondary-700 rounded-lg p-4 border border-secondary-200 dark:border-secondary-600">
<div className="flex items-start justify-between gap-4">
<div className="flex-1">
<div className="flex items-center gap-3 mb-2">
<Database className="h-5 w-5 text-primary-600 dark:text-primary-400" />
<h4 className="text-sm font-medium text-secondary-900 dark:text-white">
Docker
</h4>
{integrationsData?.data?.integrations?.docker ? (
<span className="inline-flex items-center px-2 py-0.5 rounded text-xs font-semibold bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-200">
Enabled
</span>
) : (
<span className="inline-flex items-center px-2 py-0.5 rounded text-xs font-semibold bg-gray-200 text-gray-600 dark:bg-gray-600 dark:text-gray-400">
Disabled
</span>
)}
</div>
<p className="text-xs text-secondary-600 dark:text-secondary-300">
Monitor Docker containers, images, volumes, and
networks. Collects real-time container status
events.
</p>
</div>
<div className="flex-shrink-0">
<button
type="button"
onClick={() =>
toggleIntegrationMutation.mutate({
integrationName: "docker",
enabled:
!integrationsData?.data?.integrations?.docker,
})
}
disabled={
toggleIntegrationMutation.isPending ||
!wsStatus?.connected
}
title={
!wsStatus?.connected
? "Agent is not connected"
: integrationsData?.data?.integrations?.docker
? "Disable Docker integration"
: "Enable Docker integration"
}
className={`relative inline-flex h-5 w-9 items-center rounded-full transition-colors focus:outline-none focus:ring-2 focus:ring-primary-500 focus:ring-offset-2 ${
integrationsData?.data?.integrations?.docker
? "bg-primary-600 dark:bg-primary-500"
: "bg-secondary-200 dark:bg-secondary-600"
} ${
toggleIntegrationMutation.isPending ||
!integrationsData?.data?.connected
? "opacity-50 cursor-not-allowed"
: ""
}`}
>
<span
className={`inline-block h-3 w-3 transform rounded-full bg-white transition-transform ${
integrationsData?.data?.integrations?.docker
? "translate-x-5"
: "translate-x-1"
}`}
/>
</button>
</div>
</div>
{!wsStatus?.connected && (
<p className="text-xs text-warning-600 dark:text-warning-400 mt-2">
Agent must be connected via WebSocket to toggle
integrations
</p>
)}
{toggleIntegrationMutation.isPending && (
<p className="text-xs text-secondary-600 dark:text-secondary-400 mt-2">
Updating integration...
</p>
)}
</div>
{/* Future integrations can be added here with the same pattern */}
</div>
)}
</div>
)}
</div>
</div>
</div>
@@ -1639,7 +1806,8 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
>
<option value="amd64">AMD64 (x86_64) - Default</option>
<option value="386">386 (i386) - 32-bit</option>
<option value="arm64">ARM64 (aarch64) - ARM</option>
<option value="arm64">ARM64 (aarch64) - ARM 64-bit</option>
<option value="arm">ARM (armv7l/armv6l) - ARM 32-bit</option>
</select>
<p className="text-xs text-primary-600 dark:text-primary-400 mt-1">
Select the architecture of the target host

View File

@@ -237,8 +237,14 @@ const Repositories = () => {
// Handle special cases
if (sortField === "security") {
aValue = a.isSecure ? "Secure" : "Insecure";
bValue = b.isSecure ? "Secure" : "Insecure";
// Use the same logic as filtering to determine isSecure
const aIsSecure =
a.isSecure !== undefined ? a.isSecure : a.url.startsWith("https://");
const bIsSecure =
b.isSecure !== undefined ? b.isSecure : b.url.startsWith("https://");
// Sort by boolean: true (Secure) comes before false (Insecure) when ascending
aValue = aIsSecure ? 1 : 0;
bValue = bIsSecure ? 1 : 0;
} else if (sortField === "status") {
aValue = a.is_active ? "Active" : "Inactive";
bValue = b.is_active ? "Active" : "Inactive";
@@ -535,12 +541,12 @@ const Repositories = () => {
{visibleColumns.map((column) => (
<th
key={column.id}
className="px-4 py-2 text-center text-xs font-medium text-secondary-500 dark:text-secondary-300 uppercase tracking-wider"
className="px-4 py-2 text-left text-xs font-medium text-secondary-500 dark:text-secondary-300 uppercase tracking-wider"
>
<button
type="button"
onClick={() => handleSort(column.id)}
className="flex items-center gap-1 hover:text-secondary-700 dark:hover:text-secondary-200 transition-colors"
className="flex items-center justify-start gap-1 hover:text-secondary-700 dark:hover:text-secondary-200 transition-colors"
>
{column.label}
{getSortIcon(column.id)}
@@ -559,7 +565,7 @@ const Repositories = () => {
{visibleColumns.map((column) => (
<td
key={column.id}
className="px-4 py-2 whitespace-nowrap text-center"
className="px-4 py-2 whitespace-nowrap text-left"
>
{renderCellContent(column, repo)}
</td>
@@ -622,7 +628,7 @@ const Repositories = () => {
? repo.isSecure
: repo.url.startsWith("https://");
return (
<div className="flex items-center justify-center">
<div className="flex items-center justify-start">
{isSecure ? (
<div className="flex items-center gap-1 text-green-600">
<Lock className="h-4 w-4" />
@@ -651,14 +657,14 @@ const Repositories = () => {
);
case "hostCount":
return (
<div className="flex items-center justify-center gap-1 text-sm text-secondary-900 dark:text-white">
<div className="flex items-center justify-start gap-1 text-sm text-secondary-900 dark:text-white">
<Server className="h-4 w-4" />
<span>{repo.hostCount}</span>
</div>
);
case "actions":
return (
<div className="flex items-center justify-center">
<div className="flex items-center justify-start">
<button
type="button"
onClick={(e) => handleDeleteRepository(repo, e)}

View File

@@ -99,6 +99,8 @@ export const dashboardAPI = {
},
getRecentUsers: () => api.get("/dashboard/recent-users"),
getRecentCollection: () => api.get("/dashboard/recent-collection"),
triggerSystemStatistics: () =>
api.post("/automation/trigger/system-statistics"),
};
// Admin Hosts API (for management interface)
@@ -129,6 +131,11 @@ export const adminHostsAPI = {
api.patch(`/hosts/${hostId}/notes`, {
notes: notes,
}),
getIntegrations: (hostId) => api.get(`/hosts/${hostId}/integrations`),
toggleIntegration: (hostId, integrationName, enabled) =>
api.post(`/hosts/${hostId}/integrations/${integrationName}/toggle`, {
enabled,
}),
};
// Host Groups API

153
package-lock.json generated
View File

@@ -1,19 +1,19 @@
{
"name": "patchmon",
"version": "1.3.1",
"version": "1.3.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "patchmon",
"version": "1.3.1",
"version": "1.3.2",
"license": "AGPL-3.0",
"workspaces": [
"backend",
"frontend"
],
"devDependencies": {
"@biomejs/biome": "^2.3.0",
"@biomejs/biome": "^2.3.4",
"concurrently": "^8.2.2",
"lefthook": "^1.13.4"
},
@@ -23,7 +23,7 @@
},
"backend": {
"name": "patchmon-backend",
"version": "1.3.1",
"version": "1.3.2",
"license": "AGPL-3.0",
"dependencies": {
"@bull-board/api": "^6.13.1",
@@ -59,7 +59,7 @@
},
"frontend": {
"name": "patchmon-frontend",
"version": "1.3.1",
"version": "1.3.2",
"license": "AGPL-3.0",
"dependencies": {
"@dnd-kit/core": "^6.3.1",
@@ -362,7 +362,9 @@
}
},
"node_modules/@biomejs/biome": {
"version": "2.3.0",
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-2.3.4.tgz",
"integrity": "sha512-TU08LXjBHdy0mEY9APtEtZdNQQijXUDSXR7IK1i45wgoPD5R0muK7s61QcFir6FpOj/RP1+YkPx5QJlycXUU3w==",
"dev": true,
"license": "MIT OR Apache-2.0",
"bin": {
@@ -376,18 +378,88 @@
"url": "https://opencollective.com/biome"
},
"optionalDependencies": {
"@biomejs/cli-darwin-arm64": "2.3.0",
"@biomejs/cli-darwin-x64": "2.3.0",
"@biomejs/cli-linux-arm64": "2.3.0",
"@biomejs/cli-linux-arm64-musl": "2.3.0",
"@biomejs/cli-linux-x64": "2.3.0",
"@biomejs/cli-linux-x64-musl": "2.3.0",
"@biomejs/cli-win32-arm64": "2.3.0",
"@biomejs/cli-win32-x64": "2.3.0"
"@biomejs/cli-darwin-arm64": "2.3.4",
"@biomejs/cli-darwin-x64": "2.3.4",
"@biomejs/cli-linux-arm64": "2.3.4",
"@biomejs/cli-linux-arm64-musl": "2.3.4",
"@biomejs/cli-linux-x64": "2.3.4",
"@biomejs/cli-linux-x64-musl": "2.3.4",
"@biomejs/cli-win32-arm64": "2.3.4",
"@biomejs/cli-win32-x64": "2.3.4"
}
},
"node_modules/@biomejs/cli-darwin-arm64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.3.4.tgz",
"integrity": "sha512-w40GvlNzLaqmuWYiDU6Ys9FNhJiclngKqcGld3iJIiy2bpJ0Q+8n3haiaC81uTPY/NA0d8Q/I3Z9+ajc14102Q==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-darwin-x64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.3.4.tgz",
"integrity": "sha512-3s7TLVtjJ7ni1xADXsS7x7GMUrLBZXg8SemXc3T0XLslzvqKj/dq1xGeBQ+pOWQzng9MaozfacIHdK2UlJ3jGA==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-arm64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.3.4.tgz",
"integrity": "sha512-y7efHyyM2gYmHy/AdWEip+VgTMe9973aP7XYKPzu/j8JxnPHuSUXftzmPhkVw0lfm4ECGbdBdGD6+rLmTgNZaA==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-arm64-musl": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.3.4.tgz",
"integrity": "sha512-IruVGQRwMURivWazchiq7gKAqZSFs5so6gi0hJyxk7x6HR+iwZbO2IxNOqyLURBvL06qkIHs7Wffl6Bw30vCbQ==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-x64": {
"version": "2.3.0",
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-2.3.4.tgz",
"integrity": "sha512-gKfjWR/6/dfIxPJCw8REdEowiXCkIpl9jycpNVHux8aX2yhWPLjydOshkDL6Y/82PcQJHn95VCj7J+BRcE5o1Q==",
"cpu": [
"x64"
],
@@ -401,6 +473,57 @@
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-x64-musl": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.3.4.tgz",
"integrity": "sha512-mzKFFv/w66e4/jCobFmD3kymCqG+FuWE7sVa4Yjqd9v7qt2UhXo67MSZKY9Ih18V2IwPzRKQPCw6KwdZs6AXSA==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-win32-arm64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.3.4.tgz",
"integrity": "sha512-5TJ6JfVez+yyupJ/iGUici2wzKf0RrSAxJhghQXtAEsc67OIpdwSKAQboemILrwKfHDi5s6mu7mX+VTCTUydkw==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-win32-x64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-2.3.4.tgz",
"integrity": "sha512-FGCijXecmC4IedQ0esdYNlMpx0Jxgf4zceCaMu6fkjWyjgn50ZQtMiqZZQ0Q/77yqPxvtkgZAvt5uGw0gAAjig==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@bull-board/api": {
"version": "6.13.1",
"license": "MIT",

View File

@@ -25,7 +25,7 @@
"lint:fix": "biome check --write ."
},
"devDependencies": {
"@biomejs/biome": "^2.3.0",
"@biomejs/biome": "^2.3.4",
"concurrently": "^8.2.2",
"lefthook": "^1.13.4"
},

View File

@@ -66,27 +66,27 @@ SELECTED_SERVICE_NAME=""
# Functions
print_status() {
echo -e "${GREEN}$1${NC}"
printf "${GREEN}%s${NC}\n" "$1"
}
print_info() {
echo -e "${BLUE} $1${NC}"
printf "${BLUE}%s${NC}\n" "$1"
}
print_error() {
echo -e "${RED}$1${NC}"
printf "${RED}%s${NC}\n" "$1"
}
print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
printf "${YELLOW}%s${NC}\n" "$1"
}
print_question() {
echo -e "${BLUE}$1${NC}"
printf "${BLUE}%s${NC}\n" "$1"
}
print_success() {
echo -e "${GREEN}🎉 $1${NC}"
printf "${GREEN}%s${NC}\n" "$1"
}
# Interactive input functions
@@ -443,7 +443,7 @@ generate_redis_password() {
# Find next available Redis database
find_next_redis_db() {
print_info "Finding next available Redis database..."
print_info "Finding next available Redis database..." >&2
# Start from database 0 and keep checking until we find an empty one
local db_num=0
@@ -463,11 +463,11 @@ find_next_redis_db() {
# Try to load admin credentials if ACL file exists
if [ -f /etc/redis/users.acl ] && grep -q "^user admin" /etc/redis/users.acl; then
# Redis is configured with ACL - try to extract admin password
print_info "Redis requires authentication, attempting with admin credentials..."
print_info "Redis requires authentication, attempting with admin credentials..." >&2
# For multi-instance setups, we can't know the admin password yet
# So we'll just use database 0 as default
print_info "Using database 0 (Redis ACL already configured)"
print_info "Using database 0 (Redis ACL already configured)" >&2
echo "0"
return 0
fi
@@ -484,7 +484,7 @@ find_next_redis_db() {
# Check for authentication errors
if echo "$redis_output" | grep -q "NOAUTH\|WRONGPASS"; then
# If we hit auth errors and haven't configured yet, use database 0
print_info "Redis requires authentication, defaulting to database 0"
print_info "Redis requires authentication, defaulting to database 0" >&2
echo "0"
return 0
fi
@@ -492,10 +492,10 @@ find_next_redis_db() {
# Check for other errors
if echo "$redis_output" | grep -q "ERR"; then
if echo "$redis_output" | grep -q "invalid DB index"; then
print_warning "Reached maximum database limit at database $db_num"
print_warning "Reached maximum database limit at database $db_num" >&2
break
else
print_error "Error checking database $db_num: $redis_output"
print_error "Error checking database $db_num: $redis_output" >&2
return 1
fi
fi
@@ -504,17 +504,17 @@ find_next_redis_db() {
# If database is empty, use it
if [ "$key_count" = "0" ] || [ "$key_count" = "(integer) 0" ]; then
print_status "Found available Redis database: $db_num (empty)"
print_status "Found available Redis database: $db_num (empty)" >&2
echo "$db_num"
return 0
fi
print_info "Database $db_num has $key_count keys, checking next..."
print_info "Database $db_num has $key_count keys, checking next..." >&2
db_num=$((db_num + 1))
done
print_warning "No available Redis databases found (checked 0-$max_attempts)"
print_info "Using database 0 (may have existing data)"
print_warning "No available Redis databases found (checked 0-$max_attempts)" >&2
print_info "Using database 0 (may have existing data)" >&2
echo "0"
return 0
}
@@ -1658,7 +1658,7 @@ start_services() {
local logs=$(journalctl -u "$SERVICE_NAME" -n 50 --no-pager 2>/dev/null || echo "")
if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then
print_error "Detected Redis authentication error!"
print_error "Detected Redis authentication error!"
print_info "The service cannot authenticate with Redis."
echo ""
print_info "Current Redis configuration in .env:"
@@ -1682,18 +1682,18 @@ start_services() {
print_info " cat /etc/redis/users.acl"
echo ""
elif echo "$logs" | grep -q "ECONNREFUSED.*postgresql\|Connection refused.*5432"; then
print_error "Detected PostgreSQL connection error!"
print_error "Detected PostgreSQL connection error!"
print_info "Check if PostgreSQL is running:"
print_info " systemctl status postgresql"
elif echo "$logs" | grep -q "ECONNREFUSED.*redis\|Connection refused.*6379"; then
print_error "Detected Redis connection error!"
print_error "Detected Redis connection error!"
print_info "Check if Redis is running:"
print_info " systemctl status redis-server"
elif echo "$logs" | grep -q "database.*does not exist"; then
print_error "Database does not exist!"
print_error "Database does not exist!"
print_info "Database: $DB_NAME"
elif echo "$logs" | grep -q "Error:"; then
print_error "Application error detected in logs"
print_error "Application error detected in logs"
fi
echo ""
@@ -1742,9 +1742,9 @@ async function updateSettings() {
});
}
console.log('Database settings updated successfully');
console.log('Database settings updated successfully');
} catch (error) {
console.error('Error updating settings:', error.message);
console.error('Error updating settings:', error.message);
process.exit(1);
} finally {
await prisma.\$disconnect();
@@ -1868,7 +1868,7 @@ EOF
if [ -f "$SUMMARY_FILE" ]; then
print_status "Deployment summary appended to: $SUMMARY_FILE"
else
print_error "⚠️ Failed to append to deployment-info.txt file"
print_error "Failed to append to deployment-info.txt file"
return 1
fi
}
@@ -1950,7 +1950,7 @@ EOF
print_status "Deployment information saved to: $INFO_FILE"
print_info "File details: $(ls -lh "$INFO_FILE" | awk '{print $5, $9}')"
else
print_error "⚠️ Failed to create deployment-info.txt file"
print_error "Failed to create deployment-info.txt file"
return 1
fi
}
@@ -2143,7 +2143,7 @@ deploy_instance() {
log_message "Backend port: $BACKEND_PORT"
log_message "SSL enabled: $USE_LETSENCRYPT"
print_status "🎉 PatchMon instance deployed successfully!"
print_status "PatchMon instance deployed successfully!"
echo ""
print_info "Next steps:"
echo " • Visit your URL: $SERVER_PROTOCOL_SEL://$FQDN (ensure DNS is configured)"
@@ -3237,7 +3237,7 @@ update_installation() {
sleep 5
if systemctl is-active --quiet "$service_name"; then
print_success "Update completed successfully!"
print_success "Update completed successfully!"
print_status "Service $service_name is running"
# Get new version
@@ -3265,7 +3265,7 @@ update_installation() {
local logs=$(journalctl -u "$service_name" -n 50 --no-pager 2>/dev/null || echo "")
if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then
print_error "Detected Redis authentication error!"
print_error "Detected Redis authentication error!"
print_info "The service cannot authenticate with Redis."
echo ""
print_info "Current Redis configuration in .env:"
@@ -3282,12 +3282,12 @@ update_installation() {
print_info " redis-cli --user $test_user --pass $test_pass -n ${test_db:-0} ping"
echo ""
elif echo "$logs" | grep -q "ECONNREFUSED"; then
print_error "Detected connection refused error!"
print_error "Detected connection refused error!"
print_info "Check if required services are running:"
print_info " systemctl status postgresql"
print_info " systemctl status redis-server"
elif echo "$logs" | grep -q "Error:"; then
print_error "Application error detected in logs"
print_error "Application error detected in logs"
fi
echo ""
@@ -3320,7 +3320,7 @@ main() {
# Handle update mode
if [ "$UPDATE_MODE" = "true" ]; then
print_banner
print_info "🔄 PatchMon Update Mode"
print_info "PatchMon Update Mode"
echo ""
# Select installation to update
@@ -3336,7 +3336,7 @@ main() {
# Check if existing installations are present
local existing_installs=($(detect_installations))
if [ ${#existing_installs[@]} -gt 0 ]; then
print_warning "⚠️ Found ${#existing_installs[@]} existing PatchMon installation(s):"
print_warning "Found ${#existing_installs[@]} existing PatchMon installation(s):"
for install in "${existing_installs[@]}"; do
print_info " - $install"
done