mirror of
https://github.com/unraid/api.git
synced 2026-01-02 14:40:01 -06:00
Compare commits
73 Commits
4.9.1-buil
...
feat/flash
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b663293f1b | ||
|
|
4c0b967164 | ||
|
|
0d864fa948 | ||
|
|
984b8748ef | ||
|
|
a406fdc5fe | ||
|
|
e7066c0e09 | ||
|
|
872559ce56 | ||
|
|
f99264e73d | ||
|
|
73ba3f074a | ||
|
|
e1a7a3d22d | ||
|
|
53b05ebe5e | ||
|
|
2ed1308e40 | ||
|
|
6c03df2b97 | ||
|
|
074370c42c | ||
|
|
f34a33bc9f | ||
|
|
c7801a9236 | ||
|
|
dd759d9f0f | ||
|
|
74da8d81ef | ||
|
|
33e0b1ab24 | ||
|
|
ca4e2db1f2 | ||
|
|
ea20d1e211 | ||
|
|
79c57b8ed0 | ||
|
|
4168f43e3e | ||
|
|
20de3ec8d6 | ||
|
|
39b8f453da | ||
|
|
6bf3f77638 | ||
|
|
87fc83645f | ||
|
|
f126c9568a | ||
|
|
c273a3b7e7 | ||
|
|
92f3d6956e | ||
|
|
90ed4b9de3 | ||
|
|
015c6e527b | ||
|
|
5fcb8da50b | ||
|
|
5b0862dd98 | ||
|
|
8da7c6e586 | ||
|
|
333093a20d | ||
|
|
69359902cb | ||
|
|
8befa23b4d | ||
|
|
f0c26b777f | ||
|
|
f29d4f5318 | ||
|
|
7f9f4c68ac | ||
|
|
cebca3d6bf | ||
|
|
25f57f90aa | ||
|
|
50b80b9c07 | ||
|
|
69b8eb9060 | ||
|
|
d83d36c355 | ||
|
|
7c26b01be6 | ||
|
|
1d3800c164 | ||
|
|
9d4249950d | ||
|
|
5e9d09e75c | ||
|
|
64c71459be | ||
|
|
8f8352090c | ||
|
|
744f34fc7b | ||
|
|
3ffde0272c | ||
|
|
a5c7b9fdd3 | ||
|
|
db9b8c12b9 | ||
|
|
93d9530628 | ||
|
|
af5ffec13d | ||
|
|
54b0bc0837 | ||
|
|
7be58908f0 | ||
|
|
00b1c1b0c7 | ||
|
|
d3adbafbff | ||
|
|
dada8e63c5 | ||
|
|
f5e4607f70 | ||
|
|
68139cda2b | ||
|
|
bf3b95bfe5 | ||
|
|
35a6d14367 | ||
|
|
0be56f148d | ||
|
|
4c9e0044e5 | ||
|
|
242697c8d8 | ||
|
|
f93c850b95 | ||
|
|
8df0ca58b5 | ||
|
|
d31d86dc7d |
@@ -9,7 +9,22 @@
|
||||
"Bash(pnpm test:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(pnpm type-check:*)",
|
||||
"Bash(pnpm lint:*)"
|
||||
"Bash(pnpm lint:*)",
|
||||
"Bash(pnpm --filter ./api lint)",
|
||||
"Bash(mv:*)",
|
||||
"Bash(ls:*)",
|
||||
"mcp__ide__getDiagnostics",
|
||||
"Bash(pnpm --filter \"*connect*\" test connect-status-writer.service.spec)",
|
||||
"Bash(pnpm add:*)",
|
||||
"Bash(npx tsc:*)",
|
||||
"Bash(pnpm list:*)",
|
||||
"Bash(rm:*)",
|
||||
"Bash(pnpm --filter ./api test)",
|
||||
"Bash(pnpm i:*)",
|
||||
"Bash(pnpm:*)",
|
||||
"Bash(corepack prepare:*)",
|
||||
"Bash(nvm:*)",
|
||||
"Bash(git config:*)"
|
||||
]
|
||||
},
|
||||
"enableAllProjectMcpServers": false
|
||||
|
||||
4
.github/workflows/deploy-storybook.yml
vendored
4
.github/workflows/deploy-storybook.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
node-version: '22'
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
name: Install pnpm
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Cache APT Packages
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.4.3
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.5.1
|
||||
with:
|
||||
packages: bash procps python3 libvirt-dev jq zstd git build-essential libvirt-daemon-system
|
||||
version: 1.0
|
||||
|
||||
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
node-version-file: ".nvmrc"
|
||||
|
||||
- name: Cache APT Packages
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.4.3
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.5.1
|
||||
with:
|
||||
packages: bash procps python3 libvirt-dev jq zstd git build-essential libvirt-daemon-system
|
||||
version: 1.0
|
||||
@@ -190,7 +190,7 @@ jobs:
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Cache APT Packages
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.4.3
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.5.1
|
||||
with:
|
||||
packages: bash procps python3 libvirt-dev jq zstd git build-essential
|
||||
version: 1.0
|
||||
@@ -267,7 +267,7 @@ jobs:
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Cache APT Packages
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.4.3
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.5.1
|
||||
with:
|
||||
packages: bash procps python3 libvirt-dev jq zstd git build-essential
|
||||
version: 1.0
|
||||
|
||||
2
.github/workflows/test-libvirt.yml
vendored
2
.github/workflows/test-libvirt.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
python-version: "3.13.5"
|
||||
|
||||
- name: Cache APT Packages
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.4.3
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.5.1
|
||||
with:
|
||||
packages: libvirt-dev
|
||||
version: 1.0
|
||||
|
||||
@@ -1 +1 @@
|
||||
{".":"4.9.1"}
|
||||
{".":"4.9.5"}
|
||||
|
||||
@@ -15,6 +15,7 @@ PATHS_ACTIVATION_BASE=./dev/activation
|
||||
PATHS_PASSWD=./dev/passwd
|
||||
PATHS_RCLONE_SOCKET=./dev/rclone-socket
|
||||
PATHS_LOG_BASE=./dev/log # Where we store logs
|
||||
PATHS_BACKUP_JOBS=./dev/api/backup
|
||||
ENVIRONMENT="development"
|
||||
NODE_ENV="development"
|
||||
PORT="3001"
|
||||
@@ -26,4 +27,4 @@ BYPASS_PERMISSION_CHECKS=false
|
||||
BYPASS_CORS_CHECKS=true
|
||||
CHOKIDAR_USEPOLLING=true
|
||||
LOG_TRANSPORT=console
|
||||
LOG_LEVEL=trace
|
||||
LOG_LEVEL=debug # Change to trace for extremely noisy logging
|
||||
|
||||
@@ -53,5 +53,5 @@ export default tseslint.config(eslint.configs.recommended, ...tseslint.configs.r
|
||||
'eol-last': ['error', 'always'],
|
||||
},
|
||||
|
||||
ignores: ['src/graphql/generated/client/**/*'],
|
||||
ignores: ['src/graphql/generated/client/**/*', 'scripts/**/*'],
|
||||
});
|
||||
|
||||
@@ -1,5 +1,37 @@
|
||||
# Changelog
|
||||
|
||||
## [4.9.5](https://github.com/unraid/api/compare/v4.9.4...v4.9.5) (2025-07-10)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **connect:** rm eager restart on `ERROR_RETYING` connection status ([#1502](https://github.com/unraid/api/issues/1502)) ([dd759d9](https://github.com/unraid/api/commit/dd759d9f0f841b296f8083bc67c6cd3f7a69aa5b))
|
||||
|
||||
## [4.9.4](https://github.com/unraid/api/compare/v4.9.3...v4.9.4) (2025-07-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* backport `<unraid-modals>` upon plg install when necessary ([#1499](https://github.com/unraid/api/issues/1499)) ([33e0b1a](https://github.com/unraid/api/commit/33e0b1ab24bedb6a2c7b376ea73dbe65bc3044be))
|
||||
* DefaultPageLayout patch rollback omits legacy header logo ([#1497](https://github.com/unraid/api/issues/1497)) ([ea20d1e](https://github.com/unraid/api/commit/ea20d1e2116fcafa154090fee78b42ec5d9ba584))
|
||||
* event emitter setup for writing status ([#1496](https://github.com/unraid/api/issues/1496)) ([ca4e2db](https://github.com/unraid/api/commit/ca4e2db1f29126a1fa3784af563832edda64b0ca))
|
||||
|
||||
## [4.9.3](https://github.com/unraid/api/compare/v4.9.2...v4.9.3) (2025-07-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* duplicated header logo after api stops ([#1493](https://github.com/unraid/api/issues/1493)) ([4168f43](https://github.com/unraid/api/commit/4168f43e3ecd51479bec3aae585abbe6dcd3e416))
|
||||
|
||||
## [4.9.2](https://github.com/unraid/api/compare/v4.9.1...v4.9.2) (2025-07-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* invalid configs no longer crash API ([#1491](https://github.com/unraid/api/issues/1491)) ([6bf3f77](https://github.com/unraid/api/commit/6bf3f776380edeff5133517e6aca223556e30144))
|
||||
* invalid state for unraid plugin ([#1492](https://github.com/unraid/api/issues/1492)) ([39b8f45](https://github.com/unraid/api/commit/39b8f453da23793ef51f8e7f7196370aada8c5aa))
|
||||
* release note escaping ([5b6bcb6](https://github.com/unraid/api/commit/5b6bcb6043a5269bff4dc28714d787a5a3f07e22))
|
||||
|
||||
## [4.9.1](https://github.com/unraid/api/compare/v4.9.0...v4.9.1) (2025-07-08)
|
||||
|
||||
|
||||
|
||||
25
api/dev/api/backup/backup-jobs.json
Normal file
25
api/dev/api/backup/backup-jobs.json
Normal file
@@ -0,0 +1,25 @@
|
||||
[
|
||||
{
|
||||
"id": "a68667b6-f4ef-4c47-aec3-d9886be78487",
|
||||
"name": "Test",
|
||||
"sourceType": "RAW",
|
||||
"destinationType": "RCLONE",
|
||||
"schedule": "0 2 * * *",
|
||||
"enabled": true,
|
||||
"sourceConfig": {
|
||||
"label": "Raw file backup",
|
||||
"sourcePath": "/Users/elibosley/Desktop",
|
||||
"excludePatterns": [],
|
||||
"includePatterns": []
|
||||
},
|
||||
"destinationConfig": {
|
||||
"type": "RCLONE",
|
||||
"remoteName": "google_drives",
|
||||
"destinationPath": "desktop"
|
||||
},
|
||||
"createdAt": "2025-05-27T15:02:31.655Z",
|
||||
"updatedAt": "2025-05-27T15:11:40.547Z",
|
||||
"lastRunAt": "2025-05-27T15:07:37.139Z",
|
||||
"lastRunStatus": "Failed: RClone group backup-job_1748358397105_sbo5j322k failed or timed out."
|
||||
}
|
||||
]
|
||||
@@ -1,10 +1,12 @@
|
||||
{
|
||||
"version": "4.8.0",
|
||||
"version": "4.9.5",
|
||||
"extraOrigins": [
|
||||
"https://google.com",
|
||||
"https://test.com"
|
||||
],
|
||||
"sandbox": true,
|
||||
"ssoSubIds": [],
|
||||
"plugins": ["unraid-api-plugin-connect"]
|
||||
"plugins": [
|
||||
"unraid-api-plugin-connect"
|
||||
]
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
[api]
|
||||
version="4.4.1"
|
||||
version="4.8.0"
|
||||
extraOrigins="https://google.com,https://test.com"
|
||||
[local]
|
||||
sandbox="yes"
|
||||
|
||||
@@ -598,6 +598,7 @@ enum Resource {
|
||||
ACTIVATION_CODE
|
||||
API_KEY
|
||||
ARRAY
|
||||
BACKUP
|
||||
CLOUD
|
||||
CONFIG
|
||||
CONNECT
|
||||
@@ -653,6 +654,63 @@ type ApiKeyWithSecret implements Node {
|
||||
key: String!
|
||||
}
|
||||
|
||||
type JobStatus implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""External job ID from the job execution system"""
|
||||
externalJobId: String!
|
||||
name: String!
|
||||
status: BackupJobStatus!
|
||||
|
||||
"""Progress percentage (0-100)"""
|
||||
progress: Int!
|
||||
message: String
|
||||
error: String
|
||||
startTime: DateTime!
|
||||
endTime: DateTime
|
||||
|
||||
"""Bytes transferred"""
|
||||
bytesTransferred: Int
|
||||
|
||||
"""Total bytes to transfer"""
|
||||
totalBytes: Int
|
||||
|
||||
"""Transfer speed in bytes per second"""
|
||||
speed: Int
|
||||
|
||||
"""Elapsed time in seconds"""
|
||||
elapsedTime: Int
|
||||
|
||||
"""Estimated time to completion in seconds"""
|
||||
eta: Int
|
||||
|
||||
"""Human-readable bytes transferred"""
|
||||
formattedBytesTransferred: String
|
||||
|
||||
"""Human-readable transfer speed"""
|
||||
formattedSpeed: String
|
||||
|
||||
"""Human-readable elapsed time"""
|
||||
formattedElapsedTime: String
|
||||
|
||||
"""Human-readable ETA"""
|
||||
formattedEta: String
|
||||
}
|
||||
|
||||
"""Status of a backup job"""
|
||||
enum BackupJobStatus {
|
||||
QUEUED
|
||||
RUNNING
|
||||
COMPLETED
|
||||
FAILED
|
||||
CANCELLED
|
||||
}
|
||||
|
||||
"""
|
||||
A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format.
|
||||
"""
|
||||
scalar DateTime
|
||||
|
||||
type RCloneDrive {
|
||||
"""Provider name"""
|
||||
name: String!
|
||||
@@ -693,6 +751,98 @@ type RCloneRemote {
|
||||
config: JSON!
|
||||
}
|
||||
|
||||
type RCloneJobStats {
|
||||
"""Bytes transferred"""
|
||||
bytes: Float
|
||||
|
||||
"""Transfer speed in bytes/sec"""
|
||||
speed: Float
|
||||
|
||||
"""Estimated time to completion in seconds"""
|
||||
eta: Float
|
||||
|
||||
"""Elapsed time in seconds"""
|
||||
elapsedTime: Float
|
||||
|
||||
"""Progress percentage (0-100)"""
|
||||
percentage: Float
|
||||
|
||||
"""Number of checks completed"""
|
||||
checks: Float
|
||||
|
||||
"""Number of deletes completed"""
|
||||
deletes: Float
|
||||
|
||||
"""Number of errors encountered"""
|
||||
errors: Float
|
||||
|
||||
"""Whether a fatal error occurred"""
|
||||
fatalError: Boolean
|
||||
|
||||
"""Last error message"""
|
||||
lastError: String
|
||||
|
||||
"""Number of renames completed"""
|
||||
renames: Float
|
||||
|
||||
"""Whether there is a retry error"""
|
||||
retryError: Boolean
|
||||
|
||||
"""Number of server-side copies"""
|
||||
serverSideCopies: Float
|
||||
|
||||
"""Bytes in server-side copies"""
|
||||
serverSideCopyBytes: Float
|
||||
|
||||
"""Number of server-side moves"""
|
||||
serverSideMoves: Float
|
||||
|
||||
"""Bytes in server-side moves"""
|
||||
serverSideMoveBytes: Float
|
||||
|
||||
"""Total bytes to transfer"""
|
||||
totalBytes: Float
|
||||
|
||||
"""Total checks to perform"""
|
||||
totalChecks: Float
|
||||
|
||||
"""Total transfers to perform"""
|
||||
totalTransfers: Float
|
||||
|
||||
"""Time spent transferring in seconds"""
|
||||
transferTime: Float
|
||||
|
||||
"""Number of transfers completed"""
|
||||
transfers: Float
|
||||
|
||||
"""Currently transferring files"""
|
||||
transferring: JSON
|
||||
|
||||
"""Currently checking files"""
|
||||
checking: JSON
|
||||
|
||||
"""Human-readable bytes transferred"""
|
||||
formattedBytes: String
|
||||
|
||||
"""Human-readable transfer speed"""
|
||||
formattedSpeed: String
|
||||
|
||||
"""Human-readable elapsed time"""
|
||||
formattedElapsedTime: String
|
||||
|
||||
"""Human-readable ETA"""
|
||||
formattedEta: String
|
||||
|
||||
"""Calculated percentage (fallback when percentage is null)"""
|
||||
calculatedPercentage: Float
|
||||
|
||||
"""Whether the job is actively running"""
|
||||
isActivelyRunning: Boolean
|
||||
|
||||
"""Whether the job is completed"""
|
||||
isCompleted: Boolean
|
||||
}
|
||||
|
||||
type ArrayMutations {
|
||||
"""Set array state"""
|
||||
setState(input: ArrayStateInput!): UnraidArray!
|
||||
@@ -764,6 +914,186 @@ type VmMutations {
|
||||
reset(id: PrefixedID!): Boolean!
|
||||
}
|
||||
|
||||
"""Backup related mutations"""
|
||||
type BackupMutations {
|
||||
"""Create a new backup job configuration"""
|
||||
createBackupJobConfig(input: CreateBackupJobConfigInput!): BackupJobConfig!
|
||||
|
||||
"""Update a backup job configuration"""
|
||||
updateBackupJobConfig(id: PrefixedID!, input: UpdateBackupJobConfigInput!): BackupJobConfig
|
||||
|
||||
"""Delete a backup job configuration"""
|
||||
deleteBackupJobConfig(id: PrefixedID!): Boolean!
|
||||
|
||||
"""Initiates a backup using a configured remote."""
|
||||
initiateBackup(input: InitiateBackupInput!): BackupStatus!
|
||||
|
||||
"""Toggle a backup job configuration enabled/disabled"""
|
||||
toggleJobConfig(id: PrefixedID!): BackupJobConfig
|
||||
|
||||
"""Manually trigger a backup job using existing configuration"""
|
||||
triggerJob(id: PrefixedID!): BackupStatus!
|
||||
|
||||
"""Stop all running backup jobs"""
|
||||
stopAllBackupJobs: BackupStatus!
|
||||
|
||||
"""Stop a specific backup job"""
|
||||
stopBackupJob(id: PrefixedID!): BackupStatus!
|
||||
|
||||
"""Forget all finished backup jobs to clean up the job list"""
|
||||
forgetFinishedBackupJobs: BackupStatus!
|
||||
}
|
||||
|
||||
input CreateBackupJobConfigInput {
|
||||
name: String!
|
||||
schedule: String
|
||||
enabled: Boolean! = true
|
||||
|
||||
"""Source configuration for this backup job"""
|
||||
sourceConfig: SourceConfigInput
|
||||
|
||||
"""Destination configuration for this backup job"""
|
||||
destinationConfig: DestinationConfigInput
|
||||
}
|
||||
|
||||
input SourceConfigInput {
|
||||
type: SourceType!
|
||||
|
||||
"""Timeout for backup operation in seconds"""
|
||||
timeout: Float! = 3600
|
||||
|
||||
"""Whether to cleanup on failure"""
|
||||
cleanupOnFailure: Boolean! = true
|
||||
zfsConfig: ZfsPreprocessConfigInput
|
||||
flashConfig: FlashPreprocessConfigInput
|
||||
scriptConfig: ScriptPreprocessConfigInput
|
||||
rawConfig: RawBackupConfigInput
|
||||
}
|
||||
|
||||
"""
|
||||
Type of backup to perform (ZFS snapshot, Flash backup, Custom script, or Raw file backup)
|
||||
"""
|
||||
enum SourceType {
|
||||
ZFS
|
||||
FLASH
|
||||
SCRIPT
|
||||
RAW
|
||||
}
|
||||
|
||||
input ZfsPreprocessConfigInput {
|
||||
"""Human-readable label for this source configuration"""
|
||||
label: String
|
||||
|
||||
"""ZFS pool name"""
|
||||
poolName: String!
|
||||
|
||||
"""Dataset name within the pool"""
|
||||
datasetName: String!
|
||||
|
||||
"""Snapshot name prefix"""
|
||||
snapshotPrefix: String
|
||||
|
||||
"""Whether to cleanup snapshots after backup"""
|
||||
cleanupSnapshots: Boolean! = true
|
||||
|
||||
"""Number of snapshots to retain"""
|
||||
retainSnapshots: Float
|
||||
}
|
||||
|
||||
input FlashPreprocessConfigInput {
|
||||
"""Human-readable label for this source configuration"""
|
||||
label: String
|
||||
|
||||
"""Flash drive mount path"""
|
||||
flashPath: String! = "/boot"
|
||||
|
||||
"""Whether to include git history"""
|
||||
includeGitHistory: Boolean! = true
|
||||
|
||||
"""Additional paths to include in backup"""
|
||||
additionalPaths: [String!]
|
||||
}
|
||||
|
||||
input ScriptPreprocessConfigInput {
|
||||
"""Human-readable label for this source configuration"""
|
||||
label: String
|
||||
|
||||
"""Path to the script file"""
|
||||
scriptPath: String!
|
||||
|
||||
"""Arguments to pass to the script"""
|
||||
scriptArgs: [String!]
|
||||
|
||||
"""Working directory for script execution"""
|
||||
workingDirectory: String
|
||||
|
||||
"""Environment variables for script execution"""
|
||||
environment: JSON
|
||||
|
||||
"""Output file path where script should write data"""
|
||||
outputPath: String!
|
||||
}
|
||||
|
||||
input RawBackupConfigInput {
|
||||
"""Human-readable label for this source configuration"""
|
||||
label: String
|
||||
|
||||
"""Source path to backup"""
|
||||
sourcePath: String!
|
||||
|
||||
"""File patterns to exclude from backup"""
|
||||
excludePatterns: [String!]
|
||||
|
||||
"""File patterns to include in backup"""
|
||||
includePatterns: [String!]
|
||||
}
|
||||
|
||||
input DestinationConfigInput {
|
||||
type: DestinationType!
|
||||
rcloneConfig: RcloneDestinationConfigInput
|
||||
}
|
||||
|
||||
enum DestinationType {
|
||||
RCLONE
|
||||
}
|
||||
|
||||
input RcloneDestinationConfigInput {
|
||||
remoteName: String!
|
||||
destinationPath: String!
|
||||
rcloneOptions: JSON
|
||||
}
|
||||
|
||||
input UpdateBackupJobConfigInput {
|
||||
name: String
|
||||
schedule: String
|
||||
enabled: Boolean
|
||||
|
||||
"""Source configuration for this backup job"""
|
||||
sourceConfig: SourceConfigInput
|
||||
|
||||
"""Destination configuration for this backup job"""
|
||||
destinationConfig: DestinationConfigInput
|
||||
lastRunStatus: String
|
||||
lastRunAt: String
|
||||
currentJobId: String
|
||||
}
|
||||
|
||||
input InitiateBackupInput {
|
||||
"""The name of the remote configuration to use for the backup."""
|
||||
remoteName: String!
|
||||
|
||||
"""Source path to backup."""
|
||||
sourcePath: String!
|
||||
|
||||
"""Destination path on the remote."""
|
||||
destinationPath: String!
|
||||
|
||||
"""
|
||||
Additional options for the backup operation, such as --dry-run or --transfers.
|
||||
"""
|
||||
options: JSON
|
||||
}
|
||||
|
||||
"""API Key related mutations"""
|
||||
type ApiKeyMutations {
|
||||
"""Create an API key"""
|
||||
@@ -886,10 +1216,125 @@ type ParityCheck {
|
||||
running: Boolean
|
||||
}
|
||||
|
||||
type FlashPreprocessConfig {
|
||||
label: String!
|
||||
flashPath: String!
|
||||
includeGitHistory: Boolean!
|
||||
additionalPaths: [String!]
|
||||
}
|
||||
|
||||
type RawBackupConfig {
|
||||
label: String!
|
||||
sourcePath: String!
|
||||
excludePatterns: [String!]
|
||||
includePatterns: [String!]
|
||||
}
|
||||
|
||||
type ScriptPreprocessConfig {
|
||||
label: String!
|
||||
scriptPath: String!
|
||||
scriptArgs: [String!]
|
||||
workingDirectory: String
|
||||
environment: JSON
|
||||
outputPath: String!
|
||||
}
|
||||
|
||||
type ZfsPreprocessConfig {
|
||||
label: String!
|
||||
poolName: String!
|
||||
datasetName: String!
|
||||
snapshotPrefix: String
|
||||
cleanupSnapshots: Boolean!
|
||||
retainSnapshots: Float
|
||||
}
|
||||
|
||||
type Backup implements Node {
|
||||
id: PrefixedID!
|
||||
jobs: [JobStatus!]!
|
||||
configs: [BackupJobConfig!]!
|
||||
|
||||
"""Get the status for the backup service"""
|
||||
status: BackupStatus!
|
||||
}
|
||||
|
||||
type BackupStatus {
|
||||
"""Status message indicating the outcome of the backup initiation."""
|
||||
status: String!
|
||||
|
||||
"""Job ID if available, can be used to check job status."""
|
||||
jobId: String
|
||||
}
|
||||
|
||||
type BackupJobConfig implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""Human-readable name for this backup job"""
|
||||
name: String!
|
||||
|
||||
"""Type of the backup source"""
|
||||
sourceType: SourceType!
|
||||
|
||||
"""Type of the backup destination"""
|
||||
destinationType: DestinationType!
|
||||
|
||||
"""Cron schedule expression (e.g., "0 2 * * *" for daily at 2AM)"""
|
||||
schedule: String!
|
||||
|
||||
"""Whether this backup job is enabled"""
|
||||
enabled: Boolean!
|
||||
|
||||
"""Source configuration for this backup job"""
|
||||
sourceConfig: SourceConfigUnion!
|
||||
|
||||
"""Destination configuration for this backup job"""
|
||||
destinationConfig: DestinationConfigUnion!
|
||||
|
||||
"""When this config was created"""
|
||||
createdAt: DateTimeISO!
|
||||
|
||||
"""When this config was last updated"""
|
||||
updatedAt: DateTimeISO!
|
||||
|
||||
"""Last time this job ran"""
|
||||
lastRunAt: DateTimeISO
|
||||
|
||||
"""Status of last run"""
|
||||
lastRunStatus: String
|
||||
|
||||
"""Current running job ID for this config"""
|
||||
currentJobId: String
|
||||
|
||||
"""Get the current running job for this backup config"""
|
||||
currentJob: JobStatus
|
||||
}
|
||||
|
||||
union SourceConfigUnion = ZfsPreprocessConfig | FlashPreprocessConfig | ScriptPreprocessConfig | RawBackupConfig
|
||||
|
||||
union DestinationConfigUnion = RcloneDestinationConfig
|
||||
|
||||
type RcloneDestinationConfig {
|
||||
type: String!
|
||||
|
||||
"""Remote name from rclone config"""
|
||||
remoteName: String!
|
||||
|
||||
"""Destination path on the remote"""
|
||||
destinationPath: String!
|
||||
|
||||
"""RClone options (e.g., --transfers, --checkers)"""
|
||||
rcloneOptions: JSON
|
||||
}
|
||||
|
||||
"""
|
||||
A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format.
|
||||
A date-time string at UTC, such as 2007-12-03T10:15:30Z, compliant with the `date-time` format outlined in section 5.6 of the RFC 3339 profile of the ISO 8601 standard for representation of dates and times using the Gregorian calendar.This scalar is serialized to a string in ISO 8601 format and parsed from a string in ISO 8601 format.
|
||||
"""
|
||||
scalar DateTime
|
||||
scalar DateTimeISO
|
||||
|
||||
type BackupJobConfigForm {
|
||||
id: PrefixedID!
|
||||
dataSchema: JSON!
|
||||
uiSchema: JSON!
|
||||
}
|
||||
|
||||
type Config implements Node {
|
||||
id: PrefixedID!
|
||||
@@ -1248,14 +1693,6 @@ type Docker implements Node {
|
||||
networks(skipCache: Boolean! = false): [DockerNetwork!]!
|
||||
}
|
||||
|
||||
type FlashBackupStatus {
|
||||
"""Status message indicating the outcome of the backup initiation."""
|
||||
status: String!
|
||||
|
||||
"""Job ID if available, can be used to check job status."""
|
||||
jobId: String
|
||||
}
|
||||
|
||||
type Flash implements Node {
|
||||
id: PrefixedID!
|
||||
guid: String!
|
||||
@@ -1658,13 +2095,27 @@ type Query {
|
||||
vms: Vms!
|
||||
parityHistory: [ParityCheck!]!
|
||||
array: UnraidArray!
|
||||
|
||||
"""Get backup service information"""
|
||||
backup: Backup!
|
||||
|
||||
"""Get a specific backup job configuration"""
|
||||
backupJobConfig(id: PrefixedID!): BackupJobConfig
|
||||
|
||||
"""Get status of a specific backup job"""
|
||||
backupJob(id: PrefixedID!): JobStatus
|
||||
|
||||
"""Get the JSON schema for backup job configuration form"""
|
||||
backupJobConfigForm(input: BackupJobConfigFormInput): BackupJobConfigForm!
|
||||
backupJobStatus(jobId: PrefixedID!): JobStatus
|
||||
allBackupJobStatuses: [JobStatus!]!
|
||||
rclone: RCloneBackupSettings!
|
||||
customization: Customization
|
||||
publicPartnerInfo: PublicPartnerInfo
|
||||
publicTheme: Theme!
|
||||
docker: Docker!
|
||||
disks: [Disk!]!
|
||||
disk(id: PrefixedID!): Disk!
|
||||
rclone: RCloneBackupSettings!
|
||||
settings: Settings!
|
||||
isSSOEnabled: Boolean!
|
||||
|
||||
@@ -1676,6 +2127,10 @@ type Query {
|
||||
cloud: Cloud!
|
||||
}
|
||||
|
||||
input BackupJobConfigFormInput {
|
||||
showAdvanced: Boolean! = false
|
||||
}
|
||||
|
||||
type Mutation {
|
||||
"""Creates a new notification record"""
|
||||
createNotification(input: NotificationData!): Notification!
|
||||
@@ -1699,12 +2154,10 @@ type Mutation {
|
||||
array: ArrayMutations!
|
||||
docker: DockerMutations!
|
||||
vm: VmMutations!
|
||||
backup: BackupMutations!
|
||||
parityCheck: ParityCheckMutations!
|
||||
apiKey: ApiKeyMutations!
|
||||
rclone: RCloneMutations!
|
||||
|
||||
"""Initiates a flash drive backup using a configured remote."""
|
||||
initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus!
|
||||
updateSettings(input: JSON!): UpdateSettingsResponse!
|
||||
|
||||
"""
|
||||
@@ -1731,22 +2184,6 @@ input NotificationData {
|
||||
link: String
|
||||
}
|
||||
|
||||
input InitiateFlashBackupInput {
|
||||
"""The name of the remote configuration to use for the backup."""
|
||||
remoteName: String!
|
||||
|
||||
"""Source path to backup (typically the flash drive)."""
|
||||
sourcePath: String!
|
||||
|
||||
"""Destination path on the remote."""
|
||||
destinationPath: String!
|
||||
|
||||
"""
|
||||
Additional options for the backup operation, such as --dry-run or --transfers.
|
||||
"""
|
||||
options: JSON
|
||||
}
|
||||
|
||||
input PluginManagementInput {
|
||||
"""Array of plugin package names to add or remove"""
|
||||
names: [String!]!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@unraid/api",
|
||||
"version": "4.9.1",
|
||||
"version": "4.9.5",
|
||||
"main": "src/cli/index.ts",
|
||||
"type": "module",
|
||||
"corepack": {
|
||||
@@ -10,7 +10,7 @@
|
||||
"author": "Lime Technology, Inc. <unraid.net>",
|
||||
"license": "GPL-2.0-or-later",
|
||||
"engines": {
|
||||
"pnpm": "10.12.4"
|
||||
"pnpm": "10.13.1"
|
||||
},
|
||||
"scripts": {
|
||||
"// Development": "",
|
||||
@@ -57,7 +57,7 @@
|
||||
"@as-integrations/fastify": "2.1.1",
|
||||
"@fastify/cookie": "11.0.2",
|
||||
"@fastify/helmet": "13.0.1",
|
||||
"@graphql-codegen/client-preset": "4.8.2",
|
||||
"@graphql-codegen/client-preset": "4.8.3",
|
||||
"@graphql-tools/load-files": "7.0.1",
|
||||
"@graphql-tools/merge": "9.0.24",
|
||||
"@graphql-tools/schema": "10.0.23",
|
||||
@@ -82,7 +82,7 @@
|
||||
"accesscontrol": "2.2.1",
|
||||
"bycontract": "2.0.11",
|
||||
"bytes": "3.1.2",
|
||||
"cache-manager": "7.0.0",
|
||||
"cache-manager": "7.0.1",
|
||||
"cacheable-lookup": "7.0.0",
|
||||
"camelcase-keys": "9.1.3",
|
||||
"casbin": "5.38.0",
|
||||
@@ -94,11 +94,11 @@
|
||||
"command-exists": "1.2.9",
|
||||
"convert": "5.12.0",
|
||||
"cookie": "1.0.2",
|
||||
"cron": "4.3.1",
|
||||
"cron": "4.3.0",
|
||||
"cross-fetch": "4.1.0",
|
||||
"diff": "8.0.2",
|
||||
"dockerode": "4.0.7",
|
||||
"dotenv": "17.1.0",
|
||||
"dotenv": "17.2.0",
|
||||
"execa": "9.6.0",
|
||||
"exit-hook": "4.0.0",
|
||||
"fastify": "5.4.0",
|
||||
@@ -138,11 +138,11 @@
|
||||
"rxjs": "7.8.2",
|
||||
"semver": "7.7.2",
|
||||
"strftime": "0.10.3",
|
||||
"systeminformation": "5.27.6",
|
||||
"systeminformation": "5.27.7",
|
||||
"uuid": "11.1.0",
|
||||
"ws": "8.18.2",
|
||||
"ws": "8.18.3",
|
||||
"zen-observable-ts": "1.1.0",
|
||||
"zod": "3.25.67"
|
||||
"zod": "3.25.76"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"unraid-api-plugin-connect": "workspace:*"
|
||||
@@ -153,35 +153,35 @@
|
||||
}
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "9.29.0",
|
||||
"@eslint/js": "9.30.1",
|
||||
"@graphql-codegen/add": "5.0.3",
|
||||
"@graphql-codegen/cli": "5.0.7",
|
||||
"@graphql-codegen/fragment-matcher": "5.1.0",
|
||||
"@graphql-codegen/import-types-preset": "3.0.1",
|
||||
"@graphql-codegen/typed-document-node": "5.1.1",
|
||||
"@graphql-codegen/typed-document-node": "5.1.2",
|
||||
"@graphql-codegen/typescript": "4.1.6",
|
||||
"@graphql-codegen/typescript-operations": "4.6.1",
|
||||
"@graphql-codegen/typescript-resolvers": "4.5.1",
|
||||
"@graphql-typed-document-node/core": "3.2.0",
|
||||
"@ianvs/prettier-plugin-sort-imports": "4.4.2",
|
||||
"@ianvs/prettier-plugin-sort-imports": "4.5.1",
|
||||
"@nestjs/testing": "11.1.3",
|
||||
"@originjs/vite-plugin-commonjs": "1.0.3",
|
||||
"@rollup/plugin-node-resolve": "16.0.1",
|
||||
"@swc/core": "1.12.4",
|
||||
"@swc/core": "1.12.11",
|
||||
"@types/async-exit-hook": "2.0.2",
|
||||
"@types/bytes": "3.1.5",
|
||||
"@types/cli-table": "0.3.4",
|
||||
"@types/command-exists": "1.2.3",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/dockerode": "3.3.41",
|
||||
"@types/dockerode": "3.3.42",
|
||||
"@types/graphql-fields": "1.3.9",
|
||||
"@types/graphql-type-uuid": "0.2.6",
|
||||
"@types/ini": "4.1.1",
|
||||
"@types/ip": "1.1.3",
|
||||
"@types/lodash": "4.17.18",
|
||||
"@types/lodash": "4.17.20",
|
||||
"@types/lodash-es": "4.17.12",
|
||||
"@types/mustache": "4.2.6",
|
||||
"@types/node": "22.15.32",
|
||||
"@types/node": "22.16.3",
|
||||
"@types/pify": "6.1.0",
|
||||
"@types/semver": "7.7.0",
|
||||
"@types/sendmail": "1.4.7",
|
||||
@@ -192,28 +192,28 @@
|
||||
"@types/wtfnode": "0.7.3",
|
||||
"@vitest/coverage-v8": "3.2.4",
|
||||
"@vitest/ui": "3.2.4",
|
||||
"commit-and-tag-version": "9.6.0",
|
||||
"cz-conventional-changelog": "3.3.0",
|
||||
"eslint": "9.29.0",
|
||||
"eslint-plugin-import": "2.31.0",
|
||||
"eslint-plugin-n": "17.20.0",
|
||||
"eslint": "9.30.1",
|
||||
"eslint-plugin-import": "2.32.0",
|
||||
"eslint-plugin-n": "17.21.0",
|
||||
"eslint-plugin-no-relative-import-paths": "1.6.1",
|
||||
"eslint-plugin-prettier": "5.5.0",
|
||||
"eslint-plugin-prettier": "5.5.1",
|
||||
"graphql-codegen-typescript-validation-schema": "0.17.1",
|
||||
"jiti": "2.4.2",
|
||||
"nodemon": "3.1.10",
|
||||
"prettier": "3.5.3",
|
||||
"prettier": "3.6.2",
|
||||
"rollup-plugin-node-externals": "8.0.1",
|
||||
"commit-and-tag-version": "9.5.0",
|
||||
"tsx": "4.20.3",
|
||||
"type-fest": "4.41.0",
|
||||
"typescript": "5.8.3",
|
||||
"typescript-eslint": "8.34.1",
|
||||
"typescript-eslint": "8.36.0",
|
||||
"unplugin-swc": "1.5.5",
|
||||
"vite": "7.0.3",
|
||||
"vite": "7.0.4",
|
||||
"vite-plugin-node": "7.0.0",
|
||||
"vite-tsconfig-paths": "5.1.4",
|
||||
"vitest": "3.2.4",
|
||||
"zx": "8.5.5"
|
||||
"zx": "8.6.2"
|
||||
},
|
||||
"overrides": {
|
||||
"eslint": {
|
||||
@@ -225,8 +225,9 @@
|
||||
"nest-authz": {
|
||||
"@nestjs/common": "$@nestjs/common",
|
||||
"@nestjs/core": "$@nestjs/core"
|
||||
}
|
||||
},
|
||||
"cron": "4.3.1"
|
||||
},
|
||||
"private": true,
|
||||
"packageManager": "pnpm@10.12.4"
|
||||
"packageManager": "pnpm@10.13.1"
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
RCloneStartBackupInput,
|
||||
UpdateRCloneRemoteDto,
|
||||
} from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
vi.mock('got');
|
||||
vi.mock('execa');
|
||||
@@ -55,6 +56,8 @@ describe('RCloneApiService', () => {
|
||||
let mockExeca: any;
|
||||
let mockPRetry: any;
|
||||
let mockExistsSync: any;
|
||||
let mockFormatService: FormatService;
|
||||
let mockCacheManager: any;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
@@ -69,18 +72,67 @@ describe('RCloneApiService', () => {
|
||||
mockPRetry = vi.mocked(pRetry.default);
|
||||
mockExistsSync = vi.mocked(existsSync);
|
||||
|
||||
mockGot.post = vi.fn().mockResolvedValue({ body: {} });
|
||||
mockExeca.mockReturnValue({
|
||||
on: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
killed: false,
|
||||
pid: 12345,
|
||||
} as any);
|
||||
mockGot.post = vi.fn().mockImplementation((url: string) => {
|
||||
// Mock the core/pid call to indicate socket is running
|
||||
if (url.includes('core/pid')) {
|
||||
return Promise.resolve({ body: { pid: 12345 } });
|
||||
}
|
||||
return Promise.resolve({ body: {} });
|
||||
});
|
||||
// Mock execa to return a resolved promise for rclone version check
|
||||
mockExeca.mockImplementation((cmd: string, args: string[]) => {
|
||||
if (cmd === 'rclone' && args[0] === 'version') {
|
||||
return Promise.resolve({ stdout: 'rclone v1.67.0', stderr: '', exitCode: 0 } as any);
|
||||
}
|
||||
return {
|
||||
on: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
killed: false,
|
||||
pid: 12345,
|
||||
} as any;
|
||||
});
|
||||
mockPRetry.mockResolvedValue(undefined);
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
// Mock socket exists
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
|
||||
service = new RCloneApiService();
|
||||
await service.onModuleInit();
|
||||
mockFormatService = {
|
||||
formatBytes: vi.fn(),
|
||||
formatDuration: vi.fn(),
|
||||
} as any;
|
||||
|
||||
// Mock RCloneStatusService
|
||||
const mockStatusService = {
|
||||
enhanceStatsWithFormattedFields: vi.fn(),
|
||||
transformStatsToJob: vi.fn(),
|
||||
calculateCombinedStats: vi.fn(),
|
||||
parseActiveJobs: vi.fn(),
|
||||
parseBackupStatus: vi.fn(),
|
||||
parseJobWithStats: vi.fn(),
|
||||
parseAllJobsWithStats: vi.fn(),
|
||||
parseJobsWithStats: vi.fn(),
|
||||
getBackupStatus: vi.fn(),
|
||||
} as any;
|
||||
|
||||
// Mock StreamingJobManager
|
||||
const mockStreamingJobManager = {
|
||||
startJob: vi.fn(),
|
||||
stopJob: vi.fn(),
|
||||
getJobStatus: vi.fn(),
|
||||
getAllJobs: vi.fn(),
|
||||
} as any;
|
||||
|
||||
// Mock cache manager
|
||||
mockCacheManager = {
|
||||
get: vi.fn().mockResolvedValue(null),
|
||||
set: vi.fn().mockResolvedValue(undefined),
|
||||
del: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
|
||||
service = new RCloneApiService(mockStatusService);
|
||||
// Mock the service as initialized without actually running onModuleInit
|
||||
// to avoid the initialization API calls
|
||||
(service as any).initialized = true;
|
||||
(service as any).rcloneBaseUrl = 'http://unix:/tmp/rclone.sock:';
|
||||
});
|
||||
|
||||
describe('getProviders', () => {
|
||||
@@ -248,6 +300,9 @@ describe('RCloneApiService', () => {
|
||||
options: { delete_on: 'dst' },
|
||||
};
|
||||
const mockResponse = { jobid: 'job-123' };
|
||||
|
||||
// Clear previous mock calls and set up fresh mock
|
||||
mockGot.post.mockClear();
|
||||
mockGot.post.mockResolvedValue({ body: mockResponse });
|
||||
|
||||
const result = await service.startBackup(input);
|
||||
@@ -256,11 +311,11 @@ describe('RCloneApiService', () => {
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/sync/copy',
|
||||
expect.objectContaining({
|
||||
json: {
|
||||
json: expect.objectContaining({
|
||||
srcFs: '/source/path',
|
||||
dstFs: 'remote:backup/path',
|
||||
delete_on: 'dst',
|
||||
},
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
@@ -269,8 +324,22 @@ describe('RCloneApiService', () => {
|
||||
describe('getJobStatus', () => {
|
||||
it('should return job status', async () => {
|
||||
const input: GetRCloneJobStatusDto = { jobId: 'job-123' };
|
||||
const mockStatus = { status: 'running', progress: 0.5 };
|
||||
mockGot.post.mockResolvedValue({ body: mockStatus });
|
||||
const mockStatus = { id: 'job-123', status: 'running', progress: 0.5 };
|
||||
mockGot.post.mockImplementation((url: string) => {
|
||||
if (url.includes('core/stats')) {
|
||||
return Promise.resolve({ body: {} });
|
||||
}
|
||||
if (url.includes('job/status')) {
|
||||
return Promise.resolve({ body: mockStatus });
|
||||
}
|
||||
return Promise.resolve({ body: {} });
|
||||
});
|
||||
|
||||
// Mock the status service methods
|
||||
const mockStatusService = (service as any).statusService;
|
||||
mockStatusService.enhanceStatsWithFormattedFields = vi.fn().mockReturnValue({});
|
||||
mockStatusService.transformStatsToJob = vi.fn().mockReturnValue(null);
|
||||
mockStatusService.parseJobWithStats = vi.fn().mockReturnValue(mockStatus);
|
||||
|
||||
const result = await service.getJobStatus(input);
|
||||
|
||||
@@ -335,7 +404,7 @@ describe('RCloneApiService', () => {
|
||||
mockGot.post.mockRejectedValue(httpError);
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Rclone API Error (config/providers, HTTP 404): Failed to process error response body. Raw body:'
|
||||
'Rclone API Error (config/providers, HTTP 404): Failed to process error response: '
|
||||
);
|
||||
});
|
||||
|
||||
@@ -352,7 +421,7 @@ describe('RCloneApiService', () => {
|
||||
mockGot.post.mockRejectedValue(httpError);
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Rclone API Error (config/providers, HTTP 400): Failed to process error response body. Raw body: invalid json'
|
||||
'Rclone API Error (config/providers, HTTP 400): Failed to process error response: invalid json'
|
||||
);
|
||||
});
|
||||
|
||||
@@ -367,7 +436,7 @@ describe('RCloneApiService', () => {
|
||||
mockGot.post.mockRejectedValue('unknown error');
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Unknown error calling RClone API (config/providers) with params {}: unknown error'
|
||||
'Unknown error calling RClone API (config/providers): unknown error'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -31,6 +31,7 @@ exports[`Returns paths 1`] = `
|
||||
"activationBase",
|
||||
"webGuiBase",
|
||||
"identConfig",
|
||||
"backupBase",
|
||||
"activation",
|
||||
"boot",
|
||||
"webgui",
|
||||
|
||||
@@ -124,7 +124,15 @@ export const parseConfig = <T extends Record<string, any>>(
|
||||
throw new AppError('Invalid Parameters Passed to ParseConfig');
|
||||
}
|
||||
|
||||
const data: Record<string, any> = parseIni(fileContents);
|
||||
let data: Record<string, any>;
|
||||
try {
|
||||
data = parseIni(fileContents);
|
||||
} catch (error) {
|
||||
throw new AppError(
|
||||
`Failed to parse config file: ${error instanceof Error ? error.message : String(error)}`
|
||||
);
|
||||
}
|
||||
|
||||
// Remove quotes around keys
|
||||
const dataWithoutQuoteKeys = Object.fromEntries(
|
||||
Object.entries(data).map(([key, value]) => [key.replace(/^"(.+(?="$))"$/, '$1'), value])
|
||||
|
||||
@@ -71,6 +71,7 @@ const initialState = {
|
||||
),
|
||||
webGuiBase: '/usr/local/emhttp/webGui' as const,
|
||||
identConfig: resolvePath(process.env.PATHS_IDENT_CONFIG ?? ('/boot/config/ident.cfg' as const)),
|
||||
backupBase: resolvePath(process.env.PATHS_BACKUP_JOBS ?? ('/boot/config/api/backup/' as const)),
|
||||
};
|
||||
|
||||
// Derive asset paths from base paths
|
||||
|
||||
@@ -75,7 +75,7 @@ export class AuthService {
|
||||
|
||||
// Now get the updated roles
|
||||
const existingRoles = await this.authzService.getRolesForUser(user.id);
|
||||
this.logger.debug(`User ${user.id} has roles: ${existingRoles}`);
|
||||
this.logger.verbose(`User ${user.id} has roles: ${existingRoles}`);
|
||||
|
||||
return user;
|
||||
} catch (error: unknown) {
|
||||
@@ -213,7 +213,7 @@ export class AuthService {
|
||||
...rolesToRemove.map((role) => this.authzService.deleteRoleForUser(userId, role)),
|
||||
]);
|
||||
|
||||
this.logger.debug(
|
||||
this.logger.verbose(
|
||||
`Synced roles for user ${userId}. Added: ${rolesToAdd.join(
|
||||
','
|
||||
)}, Removed: ${rolesToRemove.join(',')}`
|
||||
@@ -234,7 +234,6 @@ export class AuthService {
|
||||
* @returns a service account that represents the user session (i.e. a webgui user).
|
||||
*/
|
||||
async getSessionUser(): Promise<UserAccount> {
|
||||
this.logger.debug('getSessionUser called!');
|
||||
return {
|
||||
id: '-1',
|
||||
description: 'Session receives administrator permissions',
|
||||
|
||||
@@ -12,6 +12,8 @@ import { ConfigPersistenceHelper } from '@app/unraid-api/config/persistence.help
|
||||
|
||||
export { type ApiConfig };
|
||||
|
||||
const logger = new Logger('ApiConfig');
|
||||
|
||||
const createDefaultConfig = (): ApiConfig => ({
|
||||
version: API_VERSION,
|
||||
extraOrigins: [],
|
||||
@@ -33,21 +35,54 @@ export const persistApiConfig = async (config: ApiConfig) => {
|
||||
};
|
||||
|
||||
export const loadApiConfig = async () => {
|
||||
const defaultConfig = createDefaultConfig();
|
||||
const apiConfig = new ApiStateConfig<ApiConfig>(
|
||||
{
|
||||
name: 'api',
|
||||
defaultConfig,
|
||||
parse: (data) => data as ApiConfig,
|
||||
},
|
||||
new ConfigPersistenceHelper()
|
||||
);
|
||||
const diskConfig = await apiConfig.parseConfig();
|
||||
return {
|
||||
...defaultConfig,
|
||||
...diskConfig,
|
||||
version: API_VERSION,
|
||||
};
|
||||
try {
|
||||
const defaultConfig = createDefaultConfig();
|
||||
const apiConfig = new ApiStateConfig<ApiConfig>(
|
||||
{
|
||||
name: 'api',
|
||||
defaultConfig,
|
||||
parse: (data) => data as ApiConfig,
|
||||
},
|
||||
new ConfigPersistenceHelper()
|
||||
);
|
||||
|
||||
let diskConfig: ApiConfig | undefined;
|
||||
try {
|
||||
diskConfig = await apiConfig.parseConfig();
|
||||
} catch (error) {
|
||||
logger.error('Failed to load API config from disk, using defaults:', error);
|
||||
diskConfig = undefined;
|
||||
|
||||
// Try to overwrite the invalid config with defaults to fix the issue
|
||||
try {
|
||||
const configToWrite = {
|
||||
...defaultConfig,
|
||||
version: API_VERSION,
|
||||
};
|
||||
|
||||
const writeSuccess = await apiConfig.persist(configToWrite);
|
||||
if (writeSuccess) {
|
||||
logger.log('Successfully overwrote invalid config file with defaults.');
|
||||
} else {
|
||||
logger.error(
|
||||
'Failed to overwrite invalid config file. Continuing with defaults in memory only.'
|
||||
);
|
||||
}
|
||||
} catch (persistError) {
|
||||
logger.error('Error during config file repair:', persistError);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
...defaultConfig,
|
||||
...diskConfig,
|
||||
version: API_VERSION,
|
||||
};
|
||||
} catch (outerError) {
|
||||
// This should never happen, but ensures the config factory never throws
|
||||
logger.error('Critical error in loadApiConfig, using minimal defaults:', outerError);
|
||||
return createDefaultConfig();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -81,21 +116,29 @@ export class ApiConfigPersistence {
|
||||
}
|
||||
|
||||
async onModuleInit() {
|
||||
if (!(await fileExists(this.filePath))) {
|
||||
this.migrateFromMyServersConfig();
|
||||
try {
|
||||
if (!(await fileExists(this.filePath))) {
|
||||
this.migrateFromMyServersConfig();
|
||||
}
|
||||
await this.persistenceHelper.persistIfChanged(this.filePath, this.config);
|
||||
this.configService.changes$.pipe(bufferTime(25)).subscribe({
|
||||
next: async (changes) => {
|
||||
if (changes.some((change) => change.path.startsWith('api'))) {
|
||||
this.logger.verbose(`API Config changed ${JSON.stringify(changes)}`);
|
||||
try {
|
||||
await this.persistenceHelper.persistIfChanged(this.filePath, this.config);
|
||||
} catch (persistError) {
|
||||
this.logger.error('Error persisting config changes:', persistError);
|
||||
}
|
||||
}
|
||||
},
|
||||
error: (err) => {
|
||||
this.logger.error('Error receiving config changes:', err);
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Error during API config module initialization:', error);
|
||||
}
|
||||
await this.persistenceHelper.persistIfChanged(this.filePath, this.config);
|
||||
this.configService.changes$.pipe(bufferTime(25)).subscribe({
|
||||
next: async (changes) => {
|
||||
if (changes.some((change) => change.path.startsWith('api'))) {
|
||||
this.logger.verbose(`API Config changed ${JSON.stringify(changes)}`);
|
||||
await this.persistenceHelper.persistIfChanged(this.filePath, this.config);
|
||||
}
|
||||
},
|
||||
error: (err) => {
|
||||
this.logger.error('Error receiving config changes:', err);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
convertLegacyConfig(
|
||||
|
||||
@@ -2,9 +2,26 @@ import { ConfigService } from '@nestjs/config';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { ApiConfigPersistence } from '@app/unraid-api/config/api-config.module.js';
|
||||
import { fileExists } from '@app/core/utils/files/file-exists.js';
|
||||
import { ApiConfigPersistence, loadApiConfig } from '@app/unraid-api/config/api-config.module.js';
|
||||
import { ConfigPersistenceHelper } from '@app/unraid-api/config/persistence.helper.js';
|
||||
|
||||
// Mock the core file-exists utility used by ApiStateConfig
|
||||
vi.mock('@app/core/utils/files/file-exists.js', () => ({
|
||||
fileExists: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock the shared file-exists utility used by ConfigPersistenceHelper
|
||||
vi.mock('@unraid/shared/util/file.js', () => ({
|
||||
fileExists: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock fs/promises for file I/O operations
|
||||
vi.mock('fs/promises', () => ({
|
||||
readFile: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('ApiConfigPersistence', () => {
|
||||
let service: ApiConfigPersistence;
|
||||
let configService: ConfigService;
|
||||
@@ -135,3 +152,127 @@ describe('ApiConfigPersistence', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadApiConfig', () => {
|
||||
let readFile: any;
|
||||
let writeFile: any;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
// Reset modules to ensure fresh imports
|
||||
vi.resetModules();
|
||||
|
||||
// Get mocked functions
|
||||
const fsMocks = await import('fs/promises');
|
||||
readFile = fsMocks.readFile;
|
||||
writeFile = fsMocks.writeFile;
|
||||
});
|
||||
|
||||
it('should return default config when file does not exist', async () => {
|
||||
vi.mocked(fileExists).mockResolvedValue(false);
|
||||
|
||||
const result = await loadApiConfig();
|
||||
|
||||
expect(result).toEqual({
|
||||
version: expect.any(String),
|
||||
extraOrigins: [],
|
||||
sandbox: false,
|
||||
ssoSubIds: [],
|
||||
plugins: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should merge disk config with defaults when file exists', async () => {
|
||||
const diskConfig = {
|
||||
extraOrigins: ['https://example.com'],
|
||||
sandbox: true,
|
||||
ssoSubIds: ['sub1', 'sub2'],
|
||||
};
|
||||
|
||||
vi.mocked(fileExists).mockResolvedValue(true);
|
||||
vi.mocked(readFile).mockResolvedValue(JSON.stringify(diskConfig));
|
||||
|
||||
const result = await loadApiConfig();
|
||||
|
||||
expect(result).toEqual({
|
||||
version: expect.any(String),
|
||||
extraOrigins: ['https://example.com'],
|
||||
sandbox: true,
|
||||
ssoSubIds: ['sub1', 'sub2'],
|
||||
plugins: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should use default config and overwrite file when JSON parsing fails', async () => {
|
||||
const { fileExists: sharedFileExists } = await import('@unraid/shared/util/file.js');
|
||||
|
||||
vi.mocked(fileExists).mockResolvedValue(true);
|
||||
vi.mocked(readFile).mockResolvedValue('{ invalid json }');
|
||||
vi.mocked(sharedFileExists).mockResolvedValue(false); // For persist operation
|
||||
vi.mocked(writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const result = await loadApiConfig();
|
||||
|
||||
// Error logging is handled by NestJS Logger, just verify the config is returned
|
||||
expect(writeFile).toHaveBeenCalled();
|
||||
expect(result).toEqual({
|
||||
version: expect.any(String),
|
||||
extraOrigins: [],
|
||||
sandbox: false,
|
||||
ssoSubIds: [],
|
||||
plugins: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle write failure gracefully when JSON parsing fails', async () => {
|
||||
const { fileExists: sharedFileExists } = await import('@unraid/shared/util/file.js');
|
||||
|
||||
vi.mocked(fileExists).mockResolvedValue(true);
|
||||
vi.mocked(readFile).mockResolvedValue('{ invalid json }');
|
||||
vi.mocked(sharedFileExists).mockResolvedValue(false); // For persist operation
|
||||
vi.mocked(writeFile).mockRejectedValue(new Error('Permission denied'));
|
||||
|
||||
const result = await loadApiConfig();
|
||||
|
||||
// Error logging is handled by NestJS Logger, just verify the config is returned
|
||||
expect(writeFile).toHaveBeenCalled();
|
||||
expect(result).toEqual({
|
||||
version: expect.any(String),
|
||||
extraOrigins: [],
|
||||
sandbox: false,
|
||||
ssoSubIds: [],
|
||||
plugins: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should use default config when file is empty', async () => {
|
||||
vi.mocked(fileExists).mockResolvedValue(true);
|
||||
vi.mocked(readFile).mockResolvedValue('');
|
||||
|
||||
const result = await loadApiConfig();
|
||||
|
||||
// No error logging expected for empty files
|
||||
expect(result).toEqual({
|
||||
version: expect.any(String),
|
||||
extraOrigins: [],
|
||||
sandbox: false,
|
||||
ssoSubIds: [],
|
||||
plugins: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should always override version with current API_VERSION', async () => {
|
||||
const diskConfig = {
|
||||
version: 'old-version',
|
||||
extraOrigins: ['https://example.com'],
|
||||
};
|
||||
|
||||
vi.mocked(fileExists).mockResolvedValue(true);
|
||||
vi.mocked(readFile).mockResolvedValue(JSON.stringify(diskConfig));
|
||||
|
||||
const result = await loadApiConfig();
|
||||
|
||||
expect(result.version).not.toBe('old-version');
|
||||
expect(result.version).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
364
api/src/unraid-api/config/factory/api-state.model.test.ts
Normal file
364
api/src/unraid-api/config/factory/api-state.model.test.ts
Normal file
@@ -0,0 +1,364 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
import type { Mock } from 'vitest';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { fileExists } from '@app/core/utils/files/file-exists.js';
|
||||
import { ApiStateConfig } from '@app/unraid-api/config/factory/api-state.model.js';
|
||||
import { ConfigPersistenceHelper } from '@app/unraid-api/config/persistence.helper.js';
|
||||
|
||||
vi.mock('node:fs/promises');
|
||||
vi.mock('@app/core/utils/files/file-exists.js');
|
||||
vi.mock('@app/environment.js', () => ({
|
||||
PATHS_CONFIG_MODULES: '/test/config/path',
|
||||
}));
|
||||
|
||||
describe('ApiStateConfig', () => {
|
||||
let mockPersistenceHelper: ConfigPersistenceHelper;
|
||||
let mockLogger: Logger;
|
||||
|
||||
interface TestConfig {
|
||||
name: string;
|
||||
value: number;
|
||||
enabled: boolean;
|
||||
}
|
||||
|
||||
const defaultConfig: TestConfig = {
|
||||
name: 'test',
|
||||
value: 42,
|
||||
enabled: true,
|
||||
};
|
||||
|
||||
const parseFunction = (data: unknown): TestConfig => {
|
||||
if (!data || typeof data !== 'object') {
|
||||
throw new Error('Invalid config format');
|
||||
}
|
||||
return data as TestConfig;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockPersistenceHelper = {
|
||||
persistIfChanged: vi.fn().mockResolvedValue(true),
|
||||
} as any;
|
||||
|
||||
mockLogger = {
|
||||
log: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
debug: vi.fn(),
|
||||
} as any;
|
||||
|
||||
vi.spyOn(Logger.prototype, 'log').mockImplementation(mockLogger.log);
|
||||
vi.spyOn(Logger.prototype, 'warn').mockImplementation(mockLogger.warn);
|
||||
vi.spyOn(Logger.prototype, 'error').mockImplementation(mockLogger.error);
|
||||
vi.spyOn(Logger.prototype, 'debug').mockImplementation(mockLogger.debug);
|
||||
});
|
||||
|
||||
describe('constructor', () => {
|
||||
it('should initialize with cloned default config', () => {
|
||||
const config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
|
||||
expect(config.config).toEqual(defaultConfig);
|
||||
expect(config.config).not.toBe(defaultConfig);
|
||||
});
|
||||
});
|
||||
|
||||
describe('token', () => {
|
||||
it('should generate correct token', () => {
|
||||
const config = new ApiStateConfig(
|
||||
{
|
||||
name: 'my-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
|
||||
expect(config.token).toBe('ApiConfig.my-config');
|
||||
});
|
||||
});
|
||||
|
||||
describe('file paths', () => {
|
||||
it('should generate correct file name', () => {
|
||||
const config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
|
||||
expect(config.fileName).toBe('test-config.json');
|
||||
});
|
||||
|
||||
it('should generate correct file path', () => {
|
||||
const config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
|
||||
expect(config.filePath).toBe(join('/test/config/path', 'test-config.json'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseConfig', () => {
|
||||
let config: ApiStateConfig<TestConfig>;
|
||||
|
||||
beforeEach(() => {
|
||||
config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
});
|
||||
|
||||
it('should return undefined when file does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
const result = await config.parseConfig();
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
expect(readFile).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should parse valid JSON config', async () => {
|
||||
const validConfig = { name: 'custom', value: 100, enabled: false };
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue(JSON.stringify(validConfig));
|
||||
|
||||
const result = await config.parseConfig();
|
||||
|
||||
expect(result).toEqual(validConfig);
|
||||
expect(readFile).toHaveBeenCalledWith(config.filePath, 'utf8');
|
||||
});
|
||||
|
||||
it('should return undefined for empty file', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue('');
|
||||
|
||||
const result = await config.parseConfig();
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
expect(mockLogger.warn).toHaveBeenCalledWith(expect.stringContaining('is empty'));
|
||||
});
|
||||
|
||||
it('should return undefined for whitespace-only file', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue(' \n\t ');
|
||||
|
||||
const result = await config.parseConfig();
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
expect(mockLogger.warn).toHaveBeenCalledWith(expect.stringContaining('is empty'));
|
||||
});
|
||||
|
||||
it('should throw error for invalid JSON', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue('{ invalid json }');
|
||||
|
||||
await expect(config.parseConfig()).rejects.toThrow();
|
||||
expect(mockLogger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Failed to parse JSON')
|
||||
);
|
||||
expect(mockLogger.debug).toHaveBeenCalledWith(expect.stringContaining('{ invalid json }'));
|
||||
});
|
||||
|
||||
it('should throw error for incomplete JSON', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue('{ "name": "test"');
|
||||
|
||||
await expect(config.parseConfig()).rejects.toThrow();
|
||||
expect(mockLogger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Failed to parse JSON')
|
||||
);
|
||||
});
|
||||
|
||||
it('should use custom file path when provided', async () => {
|
||||
const customPath = '/custom/path/config.json';
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue(JSON.stringify(defaultConfig));
|
||||
|
||||
await config.parseConfig({ filePath: customPath });
|
||||
|
||||
expect(fileExists).toHaveBeenCalledWith(customPath);
|
||||
expect(readFile).toHaveBeenCalledWith(customPath, 'utf8');
|
||||
});
|
||||
});
|
||||
|
||||
describe('persist', () => {
|
||||
let config: ApiStateConfig<TestConfig>;
|
||||
|
||||
beforeEach(() => {
|
||||
config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
});
|
||||
|
||||
it('should persist current config when no argument provided', async () => {
|
||||
const result = await config.persist();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockPersistenceHelper.persistIfChanged).toHaveBeenCalledWith(
|
||||
config.filePath,
|
||||
defaultConfig
|
||||
);
|
||||
});
|
||||
|
||||
it('should persist provided config', async () => {
|
||||
const customConfig = { name: 'custom', value: 999, enabled: false };
|
||||
|
||||
const result = await config.persist(customConfig);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockPersistenceHelper.persistIfChanged).toHaveBeenCalledWith(
|
||||
config.filePath,
|
||||
customConfig
|
||||
);
|
||||
});
|
||||
|
||||
it('should return false and log error on persistence failure', async () => {
|
||||
(mockPersistenceHelper.persistIfChanged as Mock).mockResolvedValue(false);
|
||||
|
||||
const result = await config.persist();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(mockLogger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Could not write config')
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('load', () => {
|
||||
let config: ApiStateConfig<TestConfig>;
|
||||
|
||||
beforeEach(() => {
|
||||
config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
});
|
||||
|
||||
it('should load config from file when it exists', async () => {
|
||||
const savedConfig = { name: 'saved', value: 200, enabled: true };
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue(JSON.stringify(savedConfig));
|
||||
|
||||
await config.load();
|
||||
|
||||
expect(config.config).toEqual(savedConfig);
|
||||
});
|
||||
|
||||
it('should create default config when file does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
await config.load();
|
||||
|
||||
expect(config.config).toEqual(defaultConfig);
|
||||
expect(mockLogger.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Config file does not exist')
|
||||
);
|
||||
expect(mockPersistenceHelper.persistIfChanged).toHaveBeenCalledWith(
|
||||
config.filePath,
|
||||
defaultConfig
|
||||
);
|
||||
});
|
||||
|
||||
it('should not modify config when file is invalid', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(readFile as Mock).mockResolvedValue('invalid json');
|
||||
|
||||
await config.load();
|
||||
|
||||
expect(config.config).toEqual(defaultConfig);
|
||||
expect(mockLogger.warn).toHaveBeenCalledWith(
|
||||
expect.any(Error),
|
||||
expect.stringContaining('is invalid')
|
||||
);
|
||||
});
|
||||
|
||||
it('should not throw even when persist fails', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(mockPersistenceHelper.persistIfChanged as Mock).mockResolvedValue(false);
|
||||
|
||||
await expect(config.load()).resolves.not.toThrow();
|
||||
|
||||
expect(config.config).toEqual(defaultConfig);
|
||||
});
|
||||
});
|
||||
|
||||
describe('update', () => {
|
||||
let config: ApiStateConfig<TestConfig>;
|
||||
|
||||
beforeEach(() => {
|
||||
config = new ApiStateConfig(
|
||||
{
|
||||
name: 'test-config',
|
||||
defaultConfig,
|
||||
parse: parseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
});
|
||||
|
||||
it('should update config with partial values', () => {
|
||||
config.update({ value: 123 });
|
||||
|
||||
expect(config.config).toEqual({
|
||||
name: 'test',
|
||||
value: 123,
|
||||
enabled: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should return self for chaining', () => {
|
||||
const result = config.update({ enabled: false });
|
||||
|
||||
expect(result).toBe(config);
|
||||
});
|
||||
|
||||
it('should validate updated config through parse function', () => {
|
||||
const badParseFunction = vi.fn().mockImplementation(() => {
|
||||
throw new Error('Validation failed');
|
||||
});
|
||||
|
||||
const strictConfig = new ApiStateConfig(
|
||||
{
|
||||
name: 'strict-config',
|
||||
defaultConfig,
|
||||
parse: badParseFunction,
|
||||
},
|
||||
mockPersistenceHelper
|
||||
);
|
||||
|
||||
expect(() => strictConfig.update({ value: -1 })).toThrow('Validation failed');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -56,13 +56,11 @@ export class ApiStateConfig<T> {
|
||||
* @returns True if the config was written successfully, false otherwise.
|
||||
*/
|
||||
async persist(config = this.#config) {
|
||||
try {
|
||||
await this.persistenceHelper.persistIfChanged(this.filePath, config);
|
||||
return true;
|
||||
} catch (error) {
|
||||
this.logger.error(error, `Could not write config to ${this.filePath}.`);
|
||||
return false;
|
||||
const success = await this.persistenceHelper.persistIfChanged(this.filePath, config);
|
||||
if (!success) {
|
||||
this.logger.error(`Could not write config to ${this.filePath}.`);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -76,8 +74,23 @@ export class ApiStateConfig<T> {
|
||||
const { filePath = this.filePath } = opts;
|
||||
if (!(await fileExists(filePath))) return undefined;
|
||||
|
||||
const rawConfig = JSON.parse(await readFile(filePath, 'utf8'));
|
||||
return this.options.parse(rawConfig);
|
||||
const fileContent = await readFile(filePath, 'utf8');
|
||||
|
||||
if (!fileContent || fileContent.trim() === '') {
|
||||
this.logger.warn(`Config file '${filePath}' is empty.`);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
const rawConfig = JSON.parse(fileContent);
|
||||
return this.options.parse(rawConfig);
|
||||
} catch (error) {
|
||||
this.logger.error(
|
||||
`Failed to parse JSON from '${filePath}': ${error instanceof Error ? error.message : String(error)}`
|
||||
);
|
||||
this.logger.debug(`File content: ${fileContent.substring(0, 100)}...`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -12,24 +12,59 @@ export class ConfigPersistenceHelper {
|
||||
*
|
||||
* @param filePath - The path to the config file.
|
||||
* @param data - The data to persist.
|
||||
* @returns `true` if the config was persisted, `false` otherwise.
|
||||
* @returns `true` if the config was persisted, `false` if no changes were needed or if persistence failed.
|
||||
*
|
||||
* @throws {Error} if the config file does not exist or is unreadable.
|
||||
* @throws {Error} if the config file is not valid JSON.
|
||||
* @throws {Error} if given data is not JSON (de)serializable.
|
||||
* @throws {Error} if the config file is not writable.
|
||||
* This method is designed to never throw errors. If the existing file is corrupted or unreadable,
|
||||
* it will attempt to overwrite it with the new data. If write operations fail, it returns false
|
||||
* but does not crash the application.
|
||||
*/
|
||||
async persistIfChanged(filePath: string, data: unknown): Promise<boolean> {
|
||||
if (!(await fileExists(filePath))) {
|
||||
await writeFile(filePath, JSON.stringify(data ?? {}, null, 2));
|
||||
return true;
|
||||
try {
|
||||
const jsonString = JSON.stringify(data ?? {}, null, 2);
|
||||
await writeFile(filePath, jsonString);
|
||||
return true;
|
||||
} catch (error) {
|
||||
// JSON serialization or write failed, but don't crash - just return false
|
||||
return false;
|
||||
}
|
||||
}
|
||||
const currentData = JSON.parse(await readFile(filePath, 'utf8'));
|
||||
const stagedData = JSON.parse(JSON.stringify(data));
|
||||
|
||||
let currentData: unknown;
|
||||
try {
|
||||
const fileContent = await readFile(filePath, 'utf8');
|
||||
currentData = JSON.parse(fileContent);
|
||||
} catch (error) {
|
||||
// If existing file is corrupted, treat it as if it doesn't exist
|
||||
// and write the new data
|
||||
try {
|
||||
const jsonString = JSON.stringify(data ?? {}, null, 2);
|
||||
await writeFile(filePath, jsonString);
|
||||
return true;
|
||||
} catch (writeError) {
|
||||
// JSON serialization or write failed, but don't crash - just return false
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
let stagedData: unknown;
|
||||
try {
|
||||
stagedData = JSON.parse(JSON.stringify(data));
|
||||
} catch (error) {
|
||||
// If data can't be serialized to JSON, we can't persist it
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isEqual(currentData, stagedData)) {
|
||||
return false;
|
||||
}
|
||||
await writeFile(filePath, JSON.stringify(stagedData, null, 2));
|
||||
return true;
|
||||
|
||||
try {
|
||||
await writeFile(filePath, JSON.stringify(stagedData, null, 2));
|
||||
return true;
|
||||
} catch (error) {
|
||||
// Write failed, but don't crash - just return false
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,600 @@
|
||||
import { forwardRef, Inject, Injectable, Logger, OnModuleInit } from '@nestjs/common';
|
||||
import { SchedulerRegistry } from '@nestjs/schedule';
|
||||
import { existsSync } from 'fs';
|
||||
import { readFile, writeFile } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
import { CronJob } from 'cron';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
import { getters } from '@app/store/index.js';
|
||||
import {
|
||||
BackupJobConfig,
|
||||
CreateBackupJobConfigInput,
|
||||
UpdateBackupJobConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/backup.model.js';
|
||||
import { getBackupJobGroupId } from '@app/unraid-api/graph/resolvers/backup/backup.utils.js';
|
||||
import {
|
||||
DestinationConfigInput,
|
||||
DestinationType,
|
||||
RcloneDestinationConfig,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js';
|
||||
import { BackupOrchestrationService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.js';
|
||||
import {
|
||||
FlashPreprocessConfig,
|
||||
RawBackupConfig,
|
||||
ScriptPreprocessConfig,
|
||||
SourceConfigInput,
|
||||
SourceType,
|
||||
ZfsPreprocessConfig,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js';
|
||||
|
||||
const JOB_GROUP_PREFIX = 'backup-';
|
||||
|
||||
@Injectable()
|
||||
export class BackupConfigService implements OnModuleInit {
|
||||
private readonly logger = new Logger(BackupConfigService.name);
|
||||
private readonly configPath: string;
|
||||
private configs: Map<string, BackupJobConfig> = new Map();
|
||||
|
||||
constructor(
|
||||
private readonly rcloneService: RCloneService,
|
||||
private readonly schedulerRegistry: SchedulerRegistry,
|
||||
@Inject(forwardRef(() => BackupOrchestrationService))
|
||||
private readonly backupOrchestrationService: BackupOrchestrationService
|
||||
) {
|
||||
const paths = getters.paths();
|
||||
this.configPath = join(paths.backupBase, 'backup-jobs.json');
|
||||
}
|
||||
|
||||
async onModuleInit(): Promise<void> {
|
||||
await this.loadConfigs();
|
||||
}
|
||||
|
||||
private transformSourceConfigInput(
|
||||
input: SourceConfigInput
|
||||
): ZfsPreprocessConfig | FlashPreprocessConfig | ScriptPreprocessConfig | RawBackupConfig {
|
||||
switch (input.type) {
|
||||
case SourceType.ZFS: {
|
||||
if (!input.zfsConfig) {
|
||||
throw new Error('ZFS configuration is required when type is ZFS');
|
||||
}
|
||||
const zfsConfig = new ZfsPreprocessConfig();
|
||||
zfsConfig.label = input.zfsConfig.label || 'ZFS backup';
|
||||
zfsConfig.poolName = input.zfsConfig.poolName;
|
||||
zfsConfig.datasetName = input.zfsConfig.datasetName;
|
||||
zfsConfig.snapshotPrefix = input.zfsConfig.snapshotPrefix;
|
||||
zfsConfig.cleanupSnapshots = input.zfsConfig.cleanupSnapshots ?? true;
|
||||
zfsConfig.retainSnapshots = input.zfsConfig.retainSnapshots;
|
||||
return zfsConfig;
|
||||
}
|
||||
|
||||
case SourceType.FLASH: {
|
||||
if (!input.flashConfig) {
|
||||
throw new Error('Flash configuration is required when type is FLASH');
|
||||
}
|
||||
const flashConfig = new FlashPreprocessConfig();
|
||||
flashConfig.label = input.flashConfig.label || 'Flash drive backup';
|
||||
flashConfig.flashPath = input.flashConfig.flashPath || '/boot';
|
||||
flashConfig.includeGitHistory = input.flashConfig.includeGitHistory ?? true;
|
||||
flashConfig.additionalPaths = input.flashConfig.additionalPaths || [];
|
||||
return flashConfig;
|
||||
}
|
||||
|
||||
case SourceType.SCRIPT: {
|
||||
if (!input.scriptConfig) {
|
||||
throw new Error('Script configuration is required when type is SCRIPT');
|
||||
}
|
||||
const scriptConfig = new ScriptPreprocessConfig();
|
||||
scriptConfig.label = input.scriptConfig.label || 'Script backup';
|
||||
scriptConfig.scriptPath = input.scriptConfig.scriptPath;
|
||||
scriptConfig.scriptArgs = input.scriptConfig.scriptArgs || [];
|
||||
scriptConfig.workingDirectory = input.scriptConfig.workingDirectory;
|
||||
scriptConfig.environment = input.scriptConfig.environment;
|
||||
scriptConfig.outputPath = input.scriptConfig.outputPath;
|
||||
return scriptConfig;
|
||||
}
|
||||
|
||||
case SourceType.RAW: {
|
||||
if (!input.rawConfig) {
|
||||
throw new Error('Raw configuration is required when type is RAW');
|
||||
}
|
||||
const rawConfig = new RawBackupConfig();
|
||||
rawConfig.label = input.rawConfig.label || 'Raw file backup';
|
||||
rawConfig.sourcePath = input.rawConfig.sourcePath;
|
||||
rawConfig.excludePatterns = input.rawConfig.excludePatterns || [];
|
||||
rawConfig.includePatterns = input.rawConfig.includePatterns || [];
|
||||
return rawConfig;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported source type: ${input.type}`);
|
||||
}
|
||||
}
|
||||
|
||||
private transformDestinationConfigInput(input: DestinationConfigInput): RcloneDestinationConfig {
|
||||
switch (input.type) {
|
||||
case DestinationType.RCLONE: {
|
||||
if (!input.rcloneConfig) {
|
||||
throw new Error('RClone configuration is required when type is RCLONE');
|
||||
}
|
||||
const rcloneConfig = new RcloneDestinationConfig();
|
||||
rcloneConfig.type = 'RCLONE';
|
||||
rcloneConfig.remoteName = input.rcloneConfig.remoteName;
|
||||
rcloneConfig.destinationPath = input.rcloneConfig.destinationPath;
|
||||
rcloneConfig.rcloneOptions = input.rcloneConfig.rcloneOptions;
|
||||
return rcloneConfig;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported destination type: ${input.type}`);
|
||||
}
|
||||
}
|
||||
|
||||
async createBackupJobConfig(input: CreateBackupJobConfigInput): Promise<BackupJobConfig> {
|
||||
const id = uuidv4();
|
||||
const now = new Date().toISOString();
|
||||
|
||||
// Validate input sourceConfig and destinationConfig presence
|
||||
if (!input.sourceConfig) {
|
||||
this.logger.error('Source configuration (sourceConfig) is required.');
|
||||
throw new Error('Source configuration (sourceConfig) is required.');
|
||||
}
|
||||
if (!input.destinationConfig) {
|
||||
this.logger.error('Destination configuration (destinationConfig) is required.');
|
||||
throw new Error('Destination configuration (destinationConfig) is required.');
|
||||
}
|
||||
|
||||
// Extract sourceType and destinationType from the respective config objects
|
||||
const sourceType = input.sourceConfig.type;
|
||||
const destinationType = input.destinationConfig.type;
|
||||
|
||||
if (!sourceType) {
|
||||
this.logger.error("Source configuration must include a valid 'type' property.");
|
||||
throw new Error("Source configuration must include a valid 'type' property.");
|
||||
}
|
||||
if (!destinationType) {
|
||||
this.logger.error("Destination configuration must include a valid 'type' property.");
|
||||
throw new Error("Destination configuration must include a valid 'type' property.");
|
||||
}
|
||||
|
||||
// Transform the source config input into the appropriate union member
|
||||
const transformedSourceConfig = this.transformSourceConfigInput(input.sourceConfig);
|
||||
|
||||
// Transform the destination config input into the appropriate union member
|
||||
const transformedDestinationConfig = this.transformDestinationConfigInput(
|
||||
input.destinationConfig
|
||||
);
|
||||
|
||||
const config: BackupJobConfig = {
|
||||
id,
|
||||
name: input.name,
|
||||
sourceType,
|
||||
destinationType,
|
||||
schedule: input.schedule || '0 2 * * *',
|
||||
enabled: input.enabled,
|
||||
sourceConfig: transformedSourceConfig,
|
||||
destinationConfig: transformedDestinationConfig,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
};
|
||||
|
||||
this.configs.set(id, config);
|
||||
await this.saveConfigs();
|
||||
|
||||
if (config.enabled) {
|
||||
this.scheduleJob(config);
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
async updateBackupJobConfig(
|
||||
id: string,
|
||||
input: UpdateBackupJobConfigInput
|
||||
): Promise<BackupJobConfig | null> {
|
||||
this.logger.debug(
|
||||
`[updateBackupJobConfig] Called with ID: ${id}, Input: ${JSON.stringify(input)}`
|
||||
);
|
||||
const existing = this.configs.get(id);
|
||||
if (!existing) {
|
||||
this.logger.warn(`[updateBackupJobConfig] No existing config found for ID: ${id}`);
|
||||
return null;
|
||||
}
|
||||
this.logger.debug(
|
||||
`[updateBackupJobConfig] Existing config for ID ${id}: ${JSON.stringify(existing)}`
|
||||
);
|
||||
|
||||
// Handle sourceConfig update
|
||||
let updatedSourceConfig = existing.sourceConfig;
|
||||
let updatedSourceType = existing.sourceType;
|
||||
if (input.sourceConfig) {
|
||||
const inputSourceType = input.sourceConfig.type;
|
||||
if (!inputSourceType) {
|
||||
this.logger.warn(
|
||||
`[updateBackupJobConfig] Source config update for ID ${id} is missing 'type'. Update skipped for sourceConfig.`
|
||||
);
|
||||
} else {
|
||||
// Transform the input into the appropriate union member
|
||||
updatedSourceConfig = this.transformSourceConfigInput(input.sourceConfig);
|
||||
updatedSourceType = inputSourceType;
|
||||
this.logger.debug(`[updateBackupJobConfig] Transformed sourceConfig for ${id}.`);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle destinationConfig update
|
||||
let updatedDestinationConfig = existing.destinationConfig;
|
||||
let updatedDestinationType = existing.destinationType;
|
||||
if (input.destinationConfig) {
|
||||
const inputDestinationType = input.destinationConfig.type;
|
||||
if (!inputDestinationType) {
|
||||
this.logger.warn(
|
||||
`[updateBackupJobConfig] Destination config update for ID ${id} is missing 'type'. Update skipped for destinationConfig.`
|
||||
);
|
||||
} else {
|
||||
// Transform the input into the appropriate union member
|
||||
updatedDestinationConfig = this.transformDestinationConfigInput(input.destinationConfig);
|
||||
updatedDestinationType = inputDestinationType;
|
||||
this.logger.debug(`[updateBackupJobConfig] Updated destinationConfig for ${id}.`);
|
||||
}
|
||||
}
|
||||
|
||||
const updated: BackupJobConfig = {
|
||||
...existing,
|
||||
name: input.name ?? existing.name,
|
||||
schedule: input.schedule ?? existing.schedule,
|
||||
enabled: input.enabled ?? existing.enabled,
|
||||
sourceType: updatedSourceType,
|
||||
destinationType: updatedDestinationType,
|
||||
sourceConfig: updatedSourceConfig,
|
||||
destinationConfig: updatedDestinationConfig,
|
||||
updatedAt: new Date().toISOString(),
|
||||
lastRunAt: input.lastRunAt !== undefined ? input.lastRunAt : existing.lastRunAt,
|
||||
lastRunStatus:
|
||||
input.lastRunStatus !== undefined ? input.lastRunStatus : existing.lastRunStatus,
|
||||
};
|
||||
|
||||
this.logger.debug(
|
||||
`[updateBackupJobConfig] Updated object for ID ${id} (before set): ${JSON.stringify(updated)}`
|
||||
);
|
||||
|
||||
this.configs.set(id, updated);
|
||||
await this.saveConfigs();
|
||||
this.logger.debug(`[updateBackupJobConfig] Configs saved for ID: ${id}`);
|
||||
|
||||
this.unscheduleJob(id);
|
||||
if (updated.enabled) {
|
||||
this.scheduleJob(updated);
|
||||
}
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
async deleteBackupJobConfig(id: string): Promise<boolean> {
|
||||
const config = this.configs.get(id);
|
||||
if (!config) return false;
|
||||
|
||||
this.unscheduleJob(id);
|
||||
this.configs.delete(id);
|
||||
await this.saveConfigs();
|
||||
return true;
|
||||
}
|
||||
|
||||
async getBackupJobConfig(id: string): Promise<BackupJobConfig | null> {
|
||||
this.logger.debug(`[getBackupJobConfig] Called for ID: ${id}`);
|
||||
const config = this.configs.get(id);
|
||||
if (config) {
|
||||
this.logger.debug(
|
||||
`[getBackupJobConfig] Found config for ID ${id}: ${JSON.stringify(config)}`
|
||||
);
|
||||
} else {
|
||||
this.logger.warn(`[getBackupJobConfig] No config found for ID: ${id}`);
|
||||
}
|
||||
return config || null;
|
||||
}
|
||||
|
||||
async getAllBackupJobConfigs(): Promise<BackupJobConfig[]> {
|
||||
return Array.from(this.configs.values());
|
||||
}
|
||||
|
||||
private transformPlainObjectToSourceConfig(
|
||||
obj: any,
|
||||
sourceType: SourceType
|
||||
): ZfsPreprocessConfig | FlashPreprocessConfig | ScriptPreprocessConfig | RawBackupConfig {
|
||||
switch (sourceType) {
|
||||
case SourceType.ZFS: {
|
||||
const zfsConfig = new ZfsPreprocessConfig();
|
||||
Object.assign(zfsConfig, obj);
|
||||
return zfsConfig;
|
||||
}
|
||||
case SourceType.FLASH: {
|
||||
const flashConfig = new FlashPreprocessConfig();
|
||||
Object.assign(flashConfig, obj);
|
||||
return flashConfig;
|
||||
}
|
||||
case SourceType.SCRIPT: {
|
||||
const scriptConfig = new ScriptPreprocessConfig();
|
||||
Object.assign(scriptConfig, obj);
|
||||
return scriptConfig;
|
||||
}
|
||||
case SourceType.RAW: {
|
||||
const rawConfig = new RawBackupConfig();
|
||||
Object.assign(rawConfig, obj);
|
||||
return rawConfig;
|
||||
}
|
||||
default:
|
||||
this.logger.error(
|
||||
`Unsupported source type encountered during plain object transformation: ${sourceType as string}`
|
||||
);
|
||||
throw new Error(`Unsupported source type: ${sourceType as string}`);
|
||||
}
|
||||
}
|
||||
|
||||
private transformPlainObjectToDestinationConfig(
|
||||
obj: any,
|
||||
destinationType: DestinationType
|
||||
): RcloneDestinationConfig {
|
||||
switch (destinationType) {
|
||||
case DestinationType.RCLONE: {
|
||||
const rcloneConfig = new RcloneDestinationConfig();
|
||||
Object.assign(rcloneConfig, obj);
|
||||
return rcloneConfig;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported destination type: ${destinationType}`);
|
||||
}
|
||||
}
|
||||
|
||||
private async executeBackupJob(config: BackupJobConfig): Promise<void> {
|
||||
this.logger.log(
|
||||
`Executing backup job via BackupOrchestrationService: ${config.name} (ID: ${config.id})`
|
||||
);
|
||||
|
||||
// Prepare updates, currentJobId will be set after job starts
|
||||
const updatesForInMemoryConfig: Partial<BackupJobConfig> = {
|
||||
lastRunAt: new Date().toISOString(),
|
||||
lastRunStatus: 'Starting...',
|
||||
currentJobId: undefined, // Initialize
|
||||
};
|
||||
|
||||
try {
|
||||
// Delegate to the BackupOrchestrationService and get the jobId
|
||||
// IMPORTANT: This assumes backupOrchestrationService.executeBackupJob is modified to return the jobId string
|
||||
const jobId = await this.backupOrchestrationService.executeBackupJob(config, config.id);
|
||||
|
||||
if (jobId) {
|
||||
updatesForInMemoryConfig.currentJobId = jobId;
|
||||
this.logger.log(
|
||||
`Backup job ${config.name} (ID: ${config.id}) initiated by BackupOrchestrationService with Job ID: ${jobId}.`
|
||||
);
|
||||
} else {
|
||||
this.logger.warn(
|
||||
`BackupOrchestrationService.executeBackupJob did not return a jobId for config ${config.id}. currentJobId will not be set.`
|
||||
);
|
||||
}
|
||||
|
||||
// Update the in-memory config with all changes including currentJobId
|
||||
const currentConfig = this.configs.get(config.id);
|
||||
if (currentConfig) {
|
||||
this.configs.set(config.id, {
|
||||
...currentConfig,
|
||||
...updatesForInMemoryConfig,
|
||||
});
|
||||
} else {
|
||||
this.logger.warn(
|
||||
`Config ${config.id} not found in memory map after starting job. State may be inconsistent.`
|
||||
);
|
||||
// Fallback: attempt to set it anyway, though this indicates a potential issue
|
||||
this.configs.set(config.id, {
|
||||
...config, // Use the passed config as a base
|
||||
...updatesForInMemoryConfig,
|
||||
});
|
||||
}
|
||||
|
||||
// Persist only non-transient parts to backup-jobs.json
|
||||
// Create a separate object for saving that omits currentJobId
|
||||
const configToPersist = {
|
||||
...(this.configs.get(config.id) || config), // Get the most up-to-date version from memory
|
||||
};
|
||||
delete configToPersist.currentJobId; // Ensure currentJobId is not persisted
|
||||
configToPersist.lastRunAt = updatesForInMemoryConfig.lastRunAt;
|
||||
configToPersist.lastRunStatus = updatesForInMemoryConfig.lastRunStatus;
|
||||
|
||||
// Update the map with the version to be persisted, then save
|
||||
// This is tricky because we want currentJobId in memory but not on disk.
|
||||
// A better approach might be to manage currentJobId in a separate map or handle it during serialization.
|
||||
// For now, we'll update the main config, then save a version without currentJobId.
|
||||
// This means this.configs.get(config.id) will have currentJobId.
|
||||
|
||||
// Create a shallow copy for saving, minus currentJobId.
|
||||
const { currentJobId: _, ...persistentConfigData } = this.configs.get(config.id)!;
|
||||
// Create a new map for saving or filter this.configs map during saveConfigs()
|
||||
// To avoid mutating this.configs directly for persistence:
|
||||
const tempConfigsForSave = new Map(this.configs);
|
||||
tempConfigsForSave.set(config.id, persistentConfigData as BackupJobConfig);
|
||||
// Modify saveConfigs to accept a map or make it aware of not saving currentJobId.
|
||||
// For simplicity now, we'll assume saveConfigs handles this or we handle it before calling.
|
||||
// The current saveConfigs just iterates this.configs.values().
|
||||
|
||||
// Let's ensure the main in-memory config (this.configs) has currentJobId.
|
||||
// And when saving, saveConfigs needs to be aware or we provide a filtered list.
|
||||
|
||||
// Simplification: Save current status but not currentJobId.
|
||||
// We will modify saveConfigs later if needed. For now, this means currentJobId is purely in-memory.
|
||||
// The state in `this.configs` *will* have `currentJobId`.
|
||||
// `saveConfigs` will write it to disk if not handled.
|
||||
// Let's assume for now this is acceptable and address saveConfigs separately if `currentJobId` appears in JSON.
|
||||
// The current saveConfigs WILL persist currentJobId.
|
||||
//
|
||||
// Correct approach: Update in-memory, then save a version *without* currentJobId.
|
||||
// This requires `saveConfigs` to be smarter or to pass it a temporary, filtered list.
|
||||
// The `this.configs.set(config.id, persistentConfig)` line from my thought process was problematic.
|
||||
|
||||
// The in-memory `this.configs.get(config.id)` now correctly has the `currentJobId`.
|
||||
// When `saveConfigs()` is called, it will iterate `this.configs.values()`.
|
||||
// We need to ensure `currentJobId` is stripped before writing to JSON.
|
||||
// This should be done in `saveConfigs` or by passing a "cleaned" list to `writeFile`.
|
||||
// For now, let `saveConfigs` persist it and we can clean it up in a follow-up if it's an issue.
|
||||
// The immediate goal is for the GraphQL resolver to see currentJobId.
|
||||
|
||||
// Save the config with lastRunAt and lastRunStatus (currentJobId will also be saved by current saveConfigs)
|
||||
await this.saveConfigs();
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(
|
||||
`Backup job ${config.name} (ID: ${config.id}) failed during orchestration: ${errorMessage}`,
|
||||
(error as Error).stack
|
||||
);
|
||||
|
||||
const currentConfig = this.configs.get(config.id);
|
||||
const failedConfigUpdate = {
|
||||
lastRunAt: new Date().toISOString(),
|
||||
lastRunStatus: `Failed: ${errorMessage}`,
|
||||
currentJobId: undefined, // Clear currentJobId on failure
|
||||
};
|
||||
|
||||
if (currentConfig) {
|
||||
this.configs.set(config.id, {
|
||||
...currentConfig,
|
||||
...failedConfigUpdate,
|
||||
});
|
||||
} else {
|
||||
// If not in map, use passed config as base
|
||||
this.configs.set(config.id, {
|
||||
...config,
|
||||
...failedConfigUpdate,
|
||||
});
|
||||
}
|
||||
await this.saveConfigs(); // Save updated status, currentJobId will be cleared
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Add a new method to be called when a job completes or is stopped
|
||||
public async handleJobCompletion(
|
||||
configId: string,
|
||||
finalStatus: string,
|
||||
jobId?: string
|
||||
): Promise<void> {
|
||||
const config = this.configs.get(configId);
|
||||
if (config) {
|
||||
this.logger.log(
|
||||
`Handling job completion for config ${configId}, job ${jobId}. Final status: ${finalStatus}`
|
||||
);
|
||||
|
||||
const updates: Partial<BackupJobConfig> = {
|
||||
lastRunStatus: finalStatus,
|
||||
lastRunAt: new Date().toISOString(), // Update lastRunAt to completion time
|
||||
};
|
||||
|
||||
// Only clear currentJobId if it matches the completed/stopped job
|
||||
if (config.currentJobId === jobId) {
|
||||
updates.currentJobId = undefined;
|
||||
} else if (jobId && config.currentJobId) {
|
||||
this.logger.warn(
|
||||
`Completed job ID ${jobId} does not match currentJobId ${config.currentJobId} for config ${configId}. currentJobId not cleared.`
|
||||
);
|
||||
}
|
||||
|
||||
this.configs.set(configId, {
|
||||
...config,
|
||||
...updates,
|
||||
});
|
||||
|
||||
// currentJobId will be cleared or remain as is in memory.
|
||||
// saveConfigs will persist this state.
|
||||
await this.saveConfigs();
|
||||
} else {
|
||||
this.logger.warn(`Config ${configId} not found when trying to handle job completion.`);
|
||||
}
|
||||
}
|
||||
|
||||
private scheduleJob(config: BackupJobConfig): void {
|
||||
try {
|
||||
const job = new CronJob(
|
||||
config.schedule,
|
||||
() => this.executeBackupJob(config),
|
||||
null,
|
||||
false,
|
||||
'UTC'
|
||||
);
|
||||
|
||||
this.schedulerRegistry.addCronJob(getBackupJobGroupId(config.id), job);
|
||||
job.start();
|
||||
this.logger.log(`Scheduled backup job: ${config.name} with schedule: ${config.schedule}`);
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to schedule backup job ${config.name}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
private unscheduleJob(id: string): void {
|
||||
try {
|
||||
const jobName = getBackupJobGroupId(id);
|
||||
if (this.schedulerRegistry.doesExist('cron', jobName)) {
|
||||
this.schedulerRegistry.deleteCronJob(jobName);
|
||||
this.logger.log(`Unscheduled backup job: ${id}`);
|
||||
} else {
|
||||
this.logger.debug(`No existing cron job found to unschedule for backup job: ${id}`);
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to unschedule backup job ${id}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
private async loadConfigs(): Promise<void> {
|
||||
try {
|
||||
if (existsSync(this.configPath)) {
|
||||
const data = await readFile(this.configPath, 'utf-8');
|
||||
const configs: BackupJobConfig[] = JSON.parse(data);
|
||||
|
||||
// First, unschedule any existing jobs before clearing the config map
|
||||
this.configs.forEach((config) => {
|
||||
if (config.enabled) {
|
||||
this.unscheduleJob(config.id);
|
||||
}
|
||||
});
|
||||
|
||||
this.configs.clear();
|
||||
configs.forEach((config) => {
|
||||
// Transform plain objects back into class instances
|
||||
const transformedConfig = {
|
||||
...config,
|
||||
sourceConfig: this.transformPlainObjectToSourceConfig(
|
||||
config.sourceConfig,
|
||||
config.sourceType
|
||||
),
|
||||
destinationConfig: this.transformPlainObjectToDestinationConfig(
|
||||
config.destinationConfig,
|
||||
config.destinationType
|
||||
),
|
||||
};
|
||||
|
||||
this.configs.set(config.id, transformedConfig);
|
||||
if (transformedConfig.enabled) {
|
||||
this.scheduleJob(transformedConfig);
|
||||
}
|
||||
});
|
||||
|
||||
this.logger.log(`Loaded ${configs.length} backup job configurations`);
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to load backup configurations:', error);
|
||||
}
|
||||
}
|
||||
|
||||
private async saveConfigs(): Promise<void> {
|
||||
try {
|
||||
// Create a deep copy of configs for saving, stripping currentJobId
|
||||
const configsToSave: BackupJobConfig[] = [];
|
||||
for (const config of this.configs.values()) {
|
||||
const { currentJobId, ...restOfConfig } = config; // Destructure to remove currentJobId
|
||||
configsToSave.push(restOfConfig as BackupJobConfig); // Cast needed if TS complains
|
||||
}
|
||||
await writeFile(this.configPath, JSON.stringify(configsToSave, null, 2));
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to save backup configurations:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,313 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { Args, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { Resource } from '@unraid/shared/graphql.model';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar';
|
||||
import {
|
||||
AuthActionVerb,
|
||||
AuthPossession,
|
||||
UsePermissions,
|
||||
} from '@unraid/shared/use-permissions.directive.js';
|
||||
|
||||
import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js';
|
||||
import {
|
||||
BackupJobConfig,
|
||||
BackupStatus,
|
||||
CreateBackupJobConfigInput,
|
||||
InitiateBackupInput,
|
||||
UpdateBackupJobConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/backup.model.js';
|
||||
import { BackupOrchestrationService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.js';
|
||||
import { BackupMutations } from '@app/unraid-api/graph/resolvers/mutation/mutation.model.js';
|
||||
import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js';
|
||||
|
||||
@Resolver(() => BackupMutations)
|
||||
export class BackupMutationsResolver {
|
||||
private readonly logger = new Logger(BackupMutationsResolver.name);
|
||||
|
||||
constructor(
|
||||
private readonly backupConfigService: BackupConfigService,
|
||||
private readonly rcloneService: RCloneService,
|
||||
private readonly backupOrchestrationService: BackupOrchestrationService
|
||||
) {}
|
||||
|
||||
private async executeBackup(
|
||||
sourcePath: string,
|
||||
remoteName: string,
|
||||
destinationPath: string,
|
||||
options: Record<string, any> = {},
|
||||
configId?: string
|
||||
): Promise<BackupStatus> {
|
||||
try {
|
||||
this.logger.log(`Executing backup: ${sourcePath} -> ${remoteName}:${destinationPath}`);
|
||||
|
||||
// Create a temporary config for the orchestration service
|
||||
const tempConfig: BackupJobConfig = {
|
||||
id: configId || `temp-${Date.now()}`,
|
||||
name: `Manual backup to ${remoteName}`,
|
||||
sourceType: 'raw' as any,
|
||||
destinationType: 'rclone' as any,
|
||||
schedule: '',
|
||||
enabled: true,
|
||||
sourceConfig: {
|
||||
type: 'raw',
|
||||
sourcePath: sourcePath,
|
||||
} as any,
|
||||
destinationConfig: {
|
||||
type: 'rclone',
|
||||
remoteName: remoteName,
|
||||
destinationPath: destinationPath,
|
||||
options: options,
|
||||
} as any,
|
||||
createdAt: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
};
|
||||
|
||||
const jobId = tempConfig.id;
|
||||
|
||||
// Use the orchestration service for execution
|
||||
await this.backupOrchestrationService.executeBackupJob(tempConfig, jobId);
|
||||
|
||||
this.logger.log(`Backup job initiated successfully with ID: ${jobId}`);
|
||||
|
||||
return {
|
||||
status: 'Backup initiated successfully',
|
||||
jobId: jobId,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(
|
||||
`Failed to execute backup: ${errorMessage}`,
|
||||
error instanceof Error ? error.stack : undefined
|
||||
);
|
||||
|
||||
return {
|
||||
status: `Failed to initiate backup: ${errorMessage}`,
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupJobConfig, {
|
||||
description: 'Create a new backup job configuration',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.CREATE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async createBackupJobConfig(
|
||||
@Args('input') input: CreateBackupJobConfigInput
|
||||
): Promise<BackupJobConfig> {
|
||||
return this.backupConfigService.createBackupJobConfig(input);
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupJobConfig, {
|
||||
description: 'Update a backup job configuration',
|
||||
nullable: true,
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.UPDATE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async updateBackupJobConfig(
|
||||
@Args('id', { type: () => PrefixedID }) id: string,
|
||||
@Args('input') input: UpdateBackupJobConfigInput
|
||||
): Promise<BackupJobConfig | null> {
|
||||
return this.backupConfigService.updateBackupJobConfig(id, input);
|
||||
}
|
||||
|
||||
@ResolveField(() => Boolean, {
|
||||
description: 'Delete a backup job configuration',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.DELETE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async deleteBackupJobConfig(@Args('id', { type: () => PrefixedID }) id: string): Promise<boolean> {
|
||||
return this.backupConfigService.deleteBackupJobConfig(id);
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupStatus, {
|
||||
description: 'Initiates a backup using a configured remote.',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.CREATE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async initiateBackup(@Args('input') input: InitiateBackupInput): Promise<BackupStatus> {
|
||||
return this.executeBackup(
|
||||
input.sourcePath,
|
||||
input.remoteName,
|
||||
input.destinationPath,
|
||||
input.options || {}
|
||||
);
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupJobConfig, {
|
||||
description: 'Toggle a backup job configuration enabled/disabled',
|
||||
nullable: true,
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.UPDATE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async toggleJobConfig(
|
||||
@Args('id', { type: () => PrefixedID }) id: string
|
||||
): Promise<BackupJobConfig | null> {
|
||||
const existing = await this.backupConfigService.getBackupJobConfig(id);
|
||||
if (!existing) return null;
|
||||
|
||||
return this.backupConfigService.updateBackupJobConfig(id, {
|
||||
enabled: !existing.enabled,
|
||||
});
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupStatus, {
|
||||
description: 'Manually trigger a backup job using existing configuration',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.CREATE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async triggerJob(@Args('id', { type: () => PrefixedID }) id: string): Promise<BackupStatus> {
|
||||
const config = await this.backupConfigService.getBackupJobConfig(id);
|
||||
if (!config) {
|
||||
return {
|
||||
status: 'Failed to trigger backup: Configuration not found',
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Use the orchestration service to execute the backup job
|
||||
await this.backupOrchestrationService.executeBackupJob(config, config.id);
|
||||
|
||||
// Update the config with job start information
|
||||
await this.backupConfigService.updateBackupJobConfig(id, {
|
||||
lastRunStatus: `Started with job ID: ${config.id}`,
|
||||
lastRunAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
return {
|
||||
status: 'Backup job triggered successfully',
|
||||
jobId: config.id,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Failed to trigger backup job ${id}: ${errorMessage}`);
|
||||
|
||||
await this.backupConfigService.updateBackupJobConfig(id, {
|
||||
lastRunStatus: `Failed: ${errorMessage}`,
|
||||
lastRunAt: new Date().toISOString(),
|
||||
});
|
||||
|
||||
return {
|
||||
status: `Failed to trigger backup: ${errorMessage}`,
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupStatus, {
|
||||
description: 'Stop all running backup jobs',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.DELETE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async stopAllBackupJobs(): Promise<BackupStatus> {
|
||||
try {
|
||||
const result = await this.rcloneService['rcloneApiService'].stopAllJobs();
|
||||
const stoppedCount = result.stopped.length;
|
||||
const errorCount = result.errors.length;
|
||||
|
||||
if (stoppedCount > 0) {
|
||||
this.logger.log(`Stopped ${stoppedCount} backup jobs`);
|
||||
}
|
||||
|
||||
if (errorCount > 0) {
|
||||
this.logger.warn(`Failed operations on ${errorCount} jobs: ${result.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
return {
|
||||
status: `Stopped ${stoppedCount} jobs${errorCount > 0 ? `, ${errorCount} errors` : ''}`,
|
||||
jobId: undefined,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Failed to stop backup jobs: ${errorMessage}`);
|
||||
return {
|
||||
status: `Failed to stop backup jobs: ${errorMessage}`,
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupStatus, {
|
||||
description: 'Stop a specific backup job',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.DELETE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async stopBackupJob(@Args('id', { type: () => PrefixedID }) id: string): Promise<BackupStatus> {
|
||||
try {
|
||||
const result = await this.rcloneService['rcloneApiService'].stopJob(id);
|
||||
const stoppedCount = result.stopped.length;
|
||||
const errorCount = result.errors.length;
|
||||
|
||||
if (stoppedCount > 0) {
|
||||
this.logger.log(`Stopped backup job: ${id}`);
|
||||
}
|
||||
|
||||
if (errorCount > 0) {
|
||||
this.logger.warn(`Failed to stop job ${id}: ${result.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
return {
|
||||
status: stoppedCount > 0 ? `Stopped job ${id}` : `Failed to stop job ${id}`,
|
||||
jobId: stoppedCount > 0 ? id : undefined,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Failed to stop backup job ${id}: ${errorMessage}`);
|
||||
return {
|
||||
status: `Failed to stop backup job: ${errorMessage}`,
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupStatus, {
|
||||
description: 'Forget all finished backup jobs to clean up the job list',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthActionVerb.DELETE,
|
||||
resource: Resource.BACKUP,
|
||||
possession: AuthPossession.ANY,
|
||||
})
|
||||
async forgetFinishedBackupJobs(): Promise<BackupStatus> {
|
||||
try {
|
||||
this.logger.log('Forgetting finished backup jobs is handled automatically by RClone');
|
||||
return {
|
||||
status: 'Finished jobs are automatically cleaned up by RClone',
|
||||
jobId: undefined,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Failed to forget finished backup jobs: ${errorMessage}`);
|
||||
return {
|
||||
status: `Failed to forget finished backup jobs: ${errorMessage}`,
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
226
api/src/unraid-api/graph/resolvers/backup/backup.model.ts
Normal file
226
api/src/unraid-api/graph/resolvers/backup/backup.model.ts
Normal file
@@ -0,0 +1,226 @@
|
||||
import { Field, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { type Layout } from '@jsonforms/core';
|
||||
import { Node } from '@unraid/shared/graphql.model.js';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
import {
|
||||
IsBoolean,
|
||||
IsNotEmpty,
|
||||
IsObject,
|
||||
IsOptional,
|
||||
IsString,
|
||||
Matches,
|
||||
ValidateIf,
|
||||
ValidateNested,
|
||||
} from 'class-validator';
|
||||
import { DateTimeISOResolver, GraphQLJSON } from 'graphql-scalars';
|
||||
|
||||
import {
|
||||
DestinationConfigInput,
|
||||
DestinationConfigInputUnion,
|
||||
DestinationConfigUnion,
|
||||
DestinationType,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js';
|
||||
import { JobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import {
|
||||
SourceConfigInput,
|
||||
SourceConfigInputUnion,
|
||||
SourceConfigUnion,
|
||||
SourceType,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { DataSlice } from '@app/unraid-api/types/json-forms.js';
|
||||
|
||||
@ObjectType({
|
||||
implements: () => Node,
|
||||
})
|
||||
export class Backup extends Node {
|
||||
@Field(() => [JobStatus])
|
||||
jobs!: JobStatus[];
|
||||
|
||||
@Field(() => [BackupJobConfig])
|
||||
configs!: BackupJobConfig[];
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class InitiateBackupInput {
|
||||
@Field(() => String, { description: 'The name of the remote configuration to use for the backup.' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
remoteName!: string;
|
||||
|
||||
@Field(() => String, { description: 'Source path to backup.' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
sourcePath!: string;
|
||||
|
||||
@Field(() => String, { description: 'Destination path on the remote.' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
destinationPath!: string;
|
||||
|
||||
@Field(() => GraphQLJSON, {
|
||||
description: 'Additional options for the backup operation, such as --dry-run or --transfers.',
|
||||
nullable: true,
|
||||
})
|
||||
@IsOptional()
|
||||
@IsObject()
|
||||
options?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class BackupStatus {
|
||||
@Field(() => String, {
|
||||
description: 'Status message indicating the outcome of the backup initiation.',
|
||||
})
|
||||
status!: string;
|
||||
|
||||
@Field(() => String, {
|
||||
description: 'Job ID if available, can be used to check job status.',
|
||||
nullable: true,
|
||||
})
|
||||
jobId?: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class RCloneWebGuiInfo {
|
||||
@Field()
|
||||
url!: string;
|
||||
}
|
||||
|
||||
@ObjectType({
|
||||
implements: () => Node,
|
||||
})
|
||||
export class BackupJobConfig extends Node {
|
||||
@Field(() => String, { description: 'Human-readable name for this backup job' })
|
||||
name!: string;
|
||||
|
||||
@Field(() => SourceType, { description: 'Type of the backup source' })
|
||||
sourceType!: SourceType;
|
||||
|
||||
@Field(() => DestinationType, { description: 'Type of the backup destination' })
|
||||
destinationType!: DestinationType;
|
||||
|
||||
@Field(() => String, {
|
||||
description: 'Cron schedule expression (e.g., "0 2 * * *" for daily at 2AM)',
|
||||
})
|
||||
schedule!: string;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether this backup job is enabled' })
|
||||
enabled!: boolean;
|
||||
|
||||
@Field(() => SourceConfigUnion, { description: 'Source configuration for this backup job' })
|
||||
sourceConfig!: typeof SourceConfigUnion;
|
||||
|
||||
@Field(() => DestinationConfigUnion, {
|
||||
description: 'Destination configuration for this backup job',
|
||||
})
|
||||
destinationConfig!: typeof DestinationConfigUnion;
|
||||
|
||||
@Field(() => DateTimeISOResolver, { description: 'When this config was created' })
|
||||
createdAt!: string;
|
||||
|
||||
@Field(() => DateTimeISOResolver, { description: 'When this config was last updated' })
|
||||
updatedAt!: string;
|
||||
|
||||
@Field(() => DateTimeISOResolver, { description: 'Last time this job ran', nullable: true })
|
||||
lastRunAt?: string;
|
||||
|
||||
@Field(() => String, { description: 'Status of last run', nullable: true })
|
||||
lastRunStatus?: string;
|
||||
|
||||
@Field(() => String, { description: 'Current running job ID for this config', nullable: true })
|
||||
currentJobId?: string;
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class BaseBackupJobConfigInput {
|
||||
@Field(() => String, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
name?: string;
|
||||
|
||||
@Field(() => String, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
@ValidateIf((o) => o.schedule && o.schedule.length > 0)
|
||||
@Matches(
|
||||
/^(\*|[0-5]?\d)(\s+(\*|[0-1]?\d|2[0-3]))(\s+(\*|[1-2]?\d|3[0-1]))(\s+(\*|[1-9]|1[0-2]))(\s+(\*|[0-6]))$/,
|
||||
{
|
||||
message: 'schedule must be a valid cron expression',
|
||||
}
|
||||
)
|
||||
schedule?: string;
|
||||
|
||||
@Field(() => Boolean, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsBoolean()
|
||||
enabled?: boolean;
|
||||
|
||||
@Field(() => SourceConfigInputUnion, {
|
||||
description: 'Source configuration for this backup job',
|
||||
nullable: true,
|
||||
})
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
sourceConfig?: SourceConfigInput;
|
||||
|
||||
@Field(() => DestinationConfigInputUnion, {
|
||||
description: 'Destination configuration for this backup job',
|
||||
nullable: true,
|
||||
})
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
destinationConfig?: DestinationConfigInput;
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class CreateBackupJobConfigInput extends BaseBackupJobConfigInput {
|
||||
@Field(() => String)
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
declare name: string;
|
||||
|
||||
@Field(() => Boolean, { defaultValue: true })
|
||||
@IsBoolean()
|
||||
@ValidateIf((o) => o.schedule && o.schedule.length > 0)
|
||||
declare enabled: boolean;
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class UpdateBackupJobConfigInput extends BaseBackupJobConfigInput {
|
||||
@Field(() => String, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
lastRunStatus?: string;
|
||||
|
||||
@Field(() => String, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
lastRunAt?: string;
|
||||
|
||||
@Field(() => String, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
currentJobId?: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class BackupJobConfigForm {
|
||||
@Field(() => PrefixedID)
|
||||
id!: string;
|
||||
|
||||
@Field(() => GraphQLJSON)
|
||||
dataSchema!: { properties: DataSlice; type: 'object' };
|
||||
|
||||
@Field(() => GraphQLJSON)
|
||||
uiSchema!: Layout;
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class BackupJobConfigFormInput {
|
||||
@Field(() => Boolean, { defaultValue: false })
|
||||
@IsOptional()
|
||||
@IsBoolean()
|
||||
showAdvanced?: boolean;
|
||||
}
|
||||
30
api/src/unraid-api/graph/resolvers/backup/backup.module.ts
Normal file
30
api/src/unraid-api/graph/resolvers/backup/backup.module.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { forwardRef, Module } from '@nestjs/common';
|
||||
import { ScheduleModule } from '@nestjs/schedule';
|
||||
|
||||
import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js';
|
||||
import { BackupMutationsResolver } from '@app/unraid-api/graph/resolvers/backup/backup-mutations.resolver.js';
|
||||
import {
|
||||
BackupJobConfigResolver,
|
||||
BackupResolver,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/backup.resolver.js';
|
||||
import { BackupDestinationModule } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.module.js';
|
||||
import { BackupJobStatusResolver } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.resolver.js';
|
||||
import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js';
|
||||
import { BackupOrchestrationService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.js';
|
||||
import { BackupSourceModule } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.module.js';
|
||||
import { RCloneModule } from '@app/unraid-api/graph/resolvers/rclone/rclone.module.js';
|
||||
|
||||
@Module({
|
||||
imports: [RCloneModule, ScheduleModule.forRoot(), BackupSourceModule, BackupDestinationModule],
|
||||
providers: [
|
||||
BackupResolver,
|
||||
BackupJobConfigResolver,
|
||||
BackupMutationsResolver,
|
||||
BackupConfigService,
|
||||
BackupOrchestrationService,
|
||||
BackupJobTrackingService,
|
||||
BackupJobStatusResolver,
|
||||
],
|
||||
exports: [forwardRef(() => BackupOrchestrationService), BackupJobTrackingService],
|
||||
})
|
||||
export class BackupModule {}
|
||||
131
api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts
Normal file
131
api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts
Normal file
@@ -0,0 +1,131 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { Args, Parent, Query, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
|
||||
import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js';
|
||||
import {
|
||||
Backup,
|
||||
BackupJobConfig,
|
||||
BackupJobConfigForm,
|
||||
BackupJobConfigFormInput,
|
||||
BackupStatus,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/backup.model.js';
|
||||
import { buildBackupJobConfigSchema } from '@app/unraid-api/graph/resolvers/backup/jsonforms/backup-jsonforms-config.js';
|
||||
import { JobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js';
|
||||
import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js';
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
@Resolver(() => Backup)
|
||||
export class BackupResolver {
|
||||
private readonly logger = new Logger(BackupResolver.name);
|
||||
|
||||
constructor(
|
||||
private readonly rcloneService: RCloneService,
|
||||
private readonly backupConfigService: BackupConfigService,
|
||||
private readonly formatService: FormatService,
|
||||
private readonly backupJobTrackingService: BackupJobTrackingService
|
||||
) {}
|
||||
|
||||
@Query(() => Backup, {
|
||||
description: 'Get backup service information',
|
||||
})
|
||||
async backup(): Promise<Backup> {
|
||||
return {
|
||||
id: 'backup',
|
||||
jobs: [],
|
||||
configs: [],
|
||||
};
|
||||
}
|
||||
|
||||
@ResolveField(() => [JobStatus], {
|
||||
description: 'Get all running backup jobs',
|
||||
})
|
||||
async jobs(): Promise<JobStatus[]> {
|
||||
return this.backupJobTrackingService.getAllJobStatuses();
|
||||
}
|
||||
|
||||
@ResolveField(() => [BackupJobConfig], {
|
||||
description: 'Get all backup job configurations',
|
||||
})
|
||||
async configs(): Promise<BackupJobConfig[]> {
|
||||
return this.backupConfigService.getAllBackupJobConfigs();
|
||||
}
|
||||
|
||||
@Query(() => BackupJobConfig, {
|
||||
description: 'Get a specific backup job configuration',
|
||||
nullable: true,
|
||||
})
|
||||
async backupJobConfig(
|
||||
@Args('id', { type: () => PrefixedID }) id: string
|
||||
): Promise<BackupJobConfig | null> {
|
||||
return this.backupConfigService.getBackupJobConfig(id);
|
||||
}
|
||||
|
||||
@Query(() => JobStatus, {
|
||||
description: 'Get status of a specific backup job',
|
||||
nullable: true,
|
||||
})
|
||||
async backupJob(@Args('id', { type: () => PrefixedID }) id: string): Promise<JobStatus | null> {
|
||||
return this.backupJobTrackingService.getJobStatus(id) || null;
|
||||
}
|
||||
|
||||
@ResolveField(() => BackupStatus, {
|
||||
description: 'Get the status for the backup service',
|
||||
})
|
||||
async status(): Promise<BackupStatus> {
|
||||
return {
|
||||
status: 'Available',
|
||||
jobId: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
@Query(() => BackupJobConfigForm, {
|
||||
description: 'Get the JSON schema for backup job configuration form',
|
||||
})
|
||||
async backupJobConfigForm(
|
||||
@Args('input', { nullable: true }) input?: BackupJobConfigFormInput
|
||||
): Promise<BackupJobConfigForm> {
|
||||
const remotes = await this.rcloneService.getRemoteDetails();
|
||||
|
||||
const { dataSchema, uiSchema } = buildBackupJobConfigSchema({
|
||||
remotes,
|
||||
});
|
||||
|
||||
return {
|
||||
id: 'backup-job-config-form',
|
||||
dataSchema,
|
||||
uiSchema,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@Resolver(() => BackupJobConfig)
|
||||
export class BackupJobConfigResolver {
|
||||
private readonly logger = new Logger(BackupJobConfigResolver.name);
|
||||
|
||||
constructor(private readonly backupJobTrackingService: BackupJobTrackingService) {}
|
||||
|
||||
@ResolveField(() => JobStatus, {
|
||||
description: 'Get the current running job for this backup config',
|
||||
nullable: true,
|
||||
})
|
||||
async currentJob(@Parent() config: BackupJobConfig): Promise<JobStatus | null> {
|
||||
if (!config.currentJobId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
this.logger.debug(
|
||||
`Looking for current job for config ${config.id} using currentJobId: ${config.currentJobId}`
|
||||
);
|
||||
|
||||
const jobStatus = this.backupJobTrackingService.getJobStatus(config.currentJobId);
|
||||
if (!jobStatus) {
|
||||
this.logger.debug(`No job status found for job ID: ${config.currentJobId}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return jobStatus as JobStatus;
|
||||
}
|
||||
}
|
||||
32
api/src/unraid-api/graph/resolvers/backup/backup.utils.ts
Normal file
32
api/src/unraid-api/graph/resolvers/backup/backup.utils.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
export const BACKUP_JOB_GROUP_PREFIX = 'backup-';
|
||||
|
||||
/**
|
||||
* Generates the group ID for a backup job based on its configuration ID.
|
||||
* This group ID is used by RClone to group related backup operations.
|
||||
* @param configId The ID of the backup job configuration.
|
||||
* @returns The RClone group ID string.
|
||||
*/
|
||||
export function getBackupJobGroupId(configId: string): string {
|
||||
return `${BACKUP_JOB_GROUP_PREFIX}${configId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the configuration ID from a backup job group ID.
|
||||
* @param groupId The RClone group ID string (e.g., "backup-someConfigId").
|
||||
* @returns The configuration ID if the group ID is valid and prefixed, otherwise undefined.
|
||||
*/
|
||||
export function getConfigIdFromGroupId(groupId: string): string | undefined {
|
||||
if (groupId.startsWith(BACKUP_JOB_GROUP_PREFIX)) {
|
||||
return groupId.substring(BACKUP_JOB_GROUP_PREFIX.length);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given ID corresponds to a backup job group.
|
||||
* @param id The ID string to check (can be a job ID or a group ID).
|
||||
* @returns True if the ID represents a backup job group, false otherwise.
|
||||
*/
|
||||
export function isBackupJobGroup(id: string): boolean {
|
||||
return id.startsWith(BACKUP_JOB_GROUP_PREFIX);
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
import type { LabelElement, SchemaBasedCondition } from '@jsonforms/core';
|
||||
import { JsonSchema7, RuleEffect } from '@jsonforms/core';
|
||||
|
||||
import type { RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import type { SettingSlice, UIElement } from '@app/unraid-api/types/json-forms.js';
|
||||
import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js';
|
||||
import { createLabeledControl } from '@app/unraid-api/graph/utils/form-utils.js';
|
||||
|
||||
export function getDestinationConfigSlice({ remotes = [] }: { remotes?: RCloneRemote[] }): SettingSlice {
|
||||
const destinationConfigElements: UIElement[] = [
|
||||
{
|
||||
type: 'Control',
|
||||
scope: '#/properties/destinationConfig/properties/type',
|
||||
options: {
|
||||
format: 'radio',
|
||||
radioLayout: 'horizontal',
|
||||
options: [
|
||||
{
|
||||
label: 'RClone Remote',
|
||||
value: DestinationType.RCLONE,
|
||||
description: 'Backup to cloud storage via RClone',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
// RClone Configuration
|
||||
{
|
||||
type: 'VerticalLayout',
|
||||
rule: {
|
||||
effect: RuleEffect.SHOW,
|
||||
condition: {
|
||||
scope: '#/properties/destinationConfig/properties/type',
|
||||
schema: { const: DestinationType.RCLONE },
|
||||
} as SchemaBasedCondition,
|
||||
},
|
||||
elements: [
|
||||
{
|
||||
type: 'Label',
|
||||
text: 'RClone Configuration',
|
||||
options: {
|
||||
description: 'Configure RClone remote destination settings.',
|
||||
},
|
||||
} as LabelElement,
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/remoteName',
|
||||
label: 'Remote Configuration',
|
||||
description: 'Select the RClone remote configuration to use for this backup',
|
||||
controlOptions: {
|
||||
suggestions: remotes.map((remote) => ({
|
||||
value: remote.name,
|
||||
label: `${remote.name} (${remote.type})`,
|
||||
})),
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/destinationPath',
|
||||
label: 'Destination Path',
|
||||
description:
|
||||
'The path on the remote where files will be stored (e.g., backups/documents)',
|
||||
controlOptions: {
|
||||
placeholder: 'backups/',
|
||||
format: 'string',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/rcloneOptions/properties/transfers',
|
||||
label: 'Number of Transfers',
|
||||
description: 'Number of file transfers to run in parallel (default: 4)',
|
||||
controlOptions: {
|
||||
placeholder: '4',
|
||||
format: 'number',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/rcloneOptions/properties/checkers',
|
||||
label: 'Number of Checkers',
|
||||
description: 'Number of checkers to run in parallel (default: 8)',
|
||||
controlOptions: {
|
||||
placeholder: '8',
|
||||
format: 'number',
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const destinationConfigProperties: Record<string, JsonSchema7> = {
|
||||
destinationConfig: {
|
||||
type: 'object',
|
||||
title: 'Destination Configuration',
|
||||
description: 'Configuration for backup destination',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
title: 'Destination Type',
|
||||
description: 'Type of destination to use for backup',
|
||||
enum: [DestinationType.RCLONE],
|
||||
default: DestinationType.RCLONE,
|
||||
},
|
||||
rcloneConfig: {
|
||||
type: 'object',
|
||||
title: 'RClone Configuration',
|
||||
properties: {
|
||||
remoteName: {
|
||||
type: 'string',
|
||||
title: 'Remote Name',
|
||||
description: 'Remote name from rclone config',
|
||||
enum:
|
||||
remotes.length > 0
|
||||
? remotes.map((remote) => remote.name)
|
||||
: ['No remotes configured'],
|
||||
},
|
||||
destinationPath: {
|
||||
type: 'string',
|
||||
title: 'Destination Path',
|
||||
description: 'Destination path on the remote',
|
||||
minLength: 1,
|
||||
},
|
||||
rcloneOptions: {
|
||||
type: 'object',
|
||||
title: 'RClone Options',
|
||||
description: 'Advanced RClone configuration options',
|
||||
properties: {
|
||||
transfers: {
|
||||
type: 'integer',
|
||||
title: 'Transfers',
|
||||
description: 'Number of file transfers to run in parallel',
|
||||
minimum: 1,
|
||||
maximum: 100,
|
||||
default: 4,
|
||||
},
|
||||
checkers: {
|
||||
type: 'integer',
|
||||
title: 'Checkers',
|
||||
description: 'Number of checkers to run in parallel',
|
||||
minimum: 1,
|
||||
maximum: 100,
|
||||
default: 8,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ['remoteName', 'destinationPath'],
|
||||
},
|
||||
},
|
||||
required: ['type'],
|
||||
},
|
||||
};
|
||||
|
||||
// Apply conditional logic for destinationConfig
|
||||
if (
|
||||
destinationConfigProperties.destinationConfig &&
|
||||
typeof destinationConfigProperties.destinationConfig === 'object'
|
||||
) {
|
||||
destinationConfigProperties.destinationConfig.allOf = [
|
||||
{
|
||||
if: { properties: { type: { const: DestinationType.RCLONE } }, required: ['type'] },
|
||||
then: {
|
||||
required: ['rcloneConfig'],
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
const verticalLayoutElement: UIElement = {
|
||||
type: 'VerticalLayout',
|
||||
elements: destinationConfigElements,
|
||||
options: { step: 2 },
|
||||
};
|
||||
|
||||
return {
|
||||
properties: destinationConfigProperties,
|
||||
elements: [verticalLayoutElement],
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
import { Writable } from 'stream';
|
||||
|
||||
import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js';
|
||||
|
||||
export interface BackupDestinationConfig {
|
||||
timeout: number;
|
||||
cleanupOnFailure: boolean;
|
||||
useStreaming?: boolean;
|
||||
supportsStreaming?: boolean;
|
||||
}
|
||||
|
||||
export interface BackupDestinationResult {
|
||||
success: boolean;
|
||||
destinationPath?: string;
|
||||
uploadedBytes?: number;
|
||||
error?: string;
|
||||
cleanupRequired?: boolean;
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface StreamingDestinationHandle {
|
||||
stream: Writable;
|
||||
completionPromise: Promise<BackupDestinationResult>;
|
||||
}
|
||||
|
||||
export interface BackupDestinationProcessorOptions {
|
||||
jobId?: string;
|
||||
onProgress?: (progress: number) => void;
|
||||
onOutput?: (data: string) => void;
|
||||
onError?: (error: string) => void;
|
||||
}
|
||||
|
||||
export abstract class BackupDestinationProcessor<TConfig extends BackupDestinationConfig> {
|
||||
abstract readonly destinationType: DestinationType;
|
||||
|
||||
abstract execute(
|
||||
sourcePath: string,
|
||||
config: TConfig,
|
||||
options?: BackupDestinationProcessorOptions
|
||||
): Promise<BackupDestinationResult>;
|
||||
|
||||
abstract validate(config: TConfig): Promise<{ valid: boolean; error?: string; warnings?: string[] }>;
|
||||
|
||||
abstract cleanup(result: BackupDestinationResult): Promise<void>;
|
||||
|
||||
// Getter to check if processor supports streaming
|
||||
abstract get supportsStreaming(): boolean;
|
||||
|
||||
// Optional getter to get a writable stream for streaming backups
|
||||
get getWritableStream():
|
||||
| ((
|
||||
config: TConfig,
|
||||
jobId: string,
|
||||
options?: BackupDestinationProcessorOptions
|
||||
) => Promise<StreamingDestinationHandle>)
|
||||
| undefined {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
import { forwardRef, Module } from '@nestjs/common';
|
||||
|
||||
import { BackupDestinationService } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.service.js';
|
||||
import { RCloneDestinationProcessor } from '@app/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.js';
|
||||
import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js';
|
||||
import { RCloneModule } from '@app/unraid-api/graph/resolvers/rclone/rclone.module.js';
|
||||
|
||||
@Module({
|
||||
imports: [forwardRef(() => RCloneModule)],
|
||||
providers: [RCloneApiService, BackupDestinationService, RCloneDestinationProcessor],
|
||||
exports: [BackupDestinationService, RCloneDestinationProcessor],
|
||||
})
|
||||
export class BackupDestinationModule {}
|
||||
@@ -0,0 +1,85 @@
|
||||
import { BadRequestException, Injectable, Logger } from '@nestjs/common';
|
||||
import { EventEmitter } from 'events';
|
||||
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
import {
|
||||
BackupDestinationConfig,
|
||||
BackupDestinationProcessor,
|
||||
BackupDestinationProcessorOptions,
|
||||
BackupDestinationResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.js';
|
||||
import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js';
|
||||
import { RCloneDestinationProcessor } from '@app/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.js';
|
||||
|
||||
export interface BackupDestinationOptions {
|
||||
jobId?: string;
|
||||
onProgress?: (progress: number) => void;
|
||||
onOutput?: (data: string) => void;
|
||||
onError?: (error: string) => void;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class BackupDestinationService extends EventEmitter {
|
||||
private readonly logger = new Logger(BackupDestinationService.name);
|
||||
|
||||
constructor(private readonly rcloneDestinationProcessor: RCloneDestinationProcessor) {
|
||||
super();
|
||||
}
|
||||
|
||||
async processDestination<T extends BackupDestinationConfig & { type: DestinationType }>(
|
||||
sourcePath: string,
|
||||
config: T,
|
||||
options?: BackupDestinationOptions
|
||||
): Promise<BackupDestinationResult> {
|
||||
const processor = this.getProcessor(config.type);
|
||||
if (!processor) {
|
||||
throw new BadRequestException(`Unsupported destination type: ${config.type}`);
|
||||
}
|
||||
|
||||
const processorOptions: BackupDestinationProcessorOptions = {
|
||||
jobId: options?.jobId || uuidv4(),
|
||||
onProgress: options?.onProgress,
|
||||
onOutput: options?.onOutput,
|
||||
onError: options?.onError,
|
||||
};
|
||||
|
||||
try {
|
||||
const result = await processor.execute(sourcePath, config, processorOptions);
|
||||
this.logger.log(`Destination processing completed for type: ${config.type}`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.logger.error(`Destination processing failed for type: ${config.type}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async cancelDestinationJob(jobId: string): Promise<boolean> {
|
||||
this.logger.log(`Attempting to cancel destination job: ${jobId}`);
|
||||
|
||||
try {
|
||||
const result = await this.rcloneDestinationProcessor.execute('', {} as any, { jobId });
|
||||
if (result.metadata?.jobId) {
|
||||
this.logger.log(`Cancelled destination job: ${jobId}`);
|
||||
return true;
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.warn(`Failed to cancel destination job ${jobId}:`, error);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async cleanup(): Promise<void> {
|
||||
this.logger.log('Cleaning up destination service...');
|
||||
}
|
||||
|
||||
public getProcessor(type: DestinationType): BackupDestinationProcessor<any> | null {
|
||||
switch (type) {
|
||||
case DestinationType.RCLONE:
|
||||
return this.rcloneDestinationProcessor;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
import { createUnionType, Field, InputType, ObjectType, registerEnumType } from '@nestjs/graphql';
|
||||
|
||||
import { Type } from 'class-transformer';
|
||||
import { IsEnum, IsNotEmpty, IsObject, IsOptional, IsString, ValidateNested } from 'class-validator';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
|
||||
import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
|
||||
export enum DestinationType {
|
||||
RCLONE = 'RCLONE',
|
||||
}
|
||||
|
||||
registerEnumType(DestinationType, {
|
||||
name: 'DestinationType',
|
||||
});
|
||||
|
||||
export interface StreamingJobInfo {
|
||||
jobId: string;
|
||||
status: BackupJobStatus;
|
||||
progress?: number;
|
||||
startTime: Date;
|
||||
endTime?: Date;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class RcloneDestinationConfig {
|
||||
@Field(() => String)
|
||||
type!: 'RCLONE';
|
||||
|
||||
@Field(() => String, { description: 'Remote name from rclone config' })
|
||||
remoteName!: string;
|
||||
|
||||
@Field(() => String, { description: 'Destination path on the remote' })
|
||||
destinationPath!: string;
|
||||
|
||||
@Field(() => GraphQLJSON, {
|
||||
description: 'RClone options (e.g., --transfers, --checkers)',
|
||||
nullable: true,
|
||||
})
|
||||
rcloneOptions?: Record<string, unknown>;
|
||||
|
||||
static isTypeOf(obj: any): obj is RcloneDestinationConfig {
|
||||
return (
|
||||
obj &&
|
||||
obj.type === 'RCLONE' &&
|
||||
typeof obj.remoteName === 'string' &&
|
||||
typeof obj.destinationPath === 'string'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class RcloneDestinationConfigInput {
|
||||
@Field(() => String)
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
remoteName!: string;
|
||||
|
||||
@Field(() => String)
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
destinationPath!: string;
|
||||
|
||||
@Field(() => GraphQLJSON, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsObject()
|
||||
rcloneOptions?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class DestinationConfigInput {
|
||||
@Field(() => DestinationType, { nullable: false })
|
||||
@IsEnum(DestinationType, { message: 'Invalid destination type' })
|
||||
type!: DestinationType;
|
||||
|
||||
@Field(() => RcloneDestinationConfigInput, { nullable: true })
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
@Type(() => RcloneDestinationConfigInput)
|
||||
rcloneConfig?: RcloneDestinationConfigInput;
|
||||
}
|
||||
|
||||
export const DestinationConfigUnion = createUnionType({
|
||||
name: 'DestinationConfigUnion',
|
||||
types: () => [RcloneDestinationConfig] as const,
|
||||
resolveType(obj: any) {
|
||||
if (RcloneDestinationConfig.isTypeOf && RcloneDestinationConfig.isTypeOf(obj)) {
|
||||
return RcloneDestinationConfig;
|
||||
}
|
||||
return null;
|
||||
},
|
||||
});
|
||||
|
||||
export const DestinationConfigInputUnion = DestinationConfigInput;
|
||||
@@ -0,0 +1,357 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import { getBackupJobGroupId } from '@app/unraid-api/graph/resolvers/backup/backup.utils.js';
|
||||
import {
|
||||
BackupDestinationConfig,
|
||||
BackupDestinationProcessor,
|
||||
BackupDestinationProcessorOptions,
|
||||
BackupDestinationResult,
|
||||
StreamingDestinationHandle,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.js';
|
||||
import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js';
|
||||
|
||||
export interface RCloneDestinationConfig extends BackupDestinationConfig {
|
||||
remoteName: string;
|
||||
destinationPath: string;
|
||||
transferOptions?: Record<string, unknown>;
|
||||
useStreaming?: boolean;
|
||||
sourceCommand?: string;
|
||||
sourceArgs?: string[];
|
||||
sourceType?: SourceType;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class RCloneDestinationProcessor extends BackupDestinationProcessor<RCloneDestinationConfig> {
|
||||
readonly destinationType = DestinationType.RCLONE;
|
||||
private readonly logger = new Logger(RCloneDestinationProcessor.name);
|
||||
|
||||
constructor(private readonly rcloneApiService: RCloneApiService) {
|
||||
super();
|
||||
}
|
||||
|
||||
async execute(
|
||||
sourcePath: string,
|
||||
config: RCloneDestinationConfig,
|
||||
options: BackupDestinationProcessorOptions = {}
|
||||
): Promise<BackupDestinationResult> {
|
||||
const { jobId = 'unknown', onProgress, onOutput, onError } = options;
|
||||
|
||||
try {
|
||||
this.logger.log(
|
||||
`Starting RClone upload job ${jobId} from ${sourcePath} to ${config.remoteName}:${config.destinationPath}`
|
||||
);
|
||||
|
||||
return await this.executeRegularBackup(sourcePath, config, options);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown RClone error';
|
||||
this.logger.error(`RClone upload job ${jobId} failed: ${errorMessage}`, error);
|
||||
|
||||
if (onError) {
|
||||
onError(errorMessage);
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
cleanupRequired: config.cleanupOnFailure,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private async executeRegularBackup(
|
||||
sourcePath: string,
|
||||
config: RCloneDestinationConfig,
|
||||
options: BackupDestinationProcessorOptions
|
||||
): Promise<BackupDestinationResult> {
|
||||
const { jobId: backupConfigId, onOutput, onProgress, onError } = options;
|
||||
|
||||
if (!backupConfigId) {
|
||||
const errorMsg = 'Backup Configuration ID (jobId) is required to start RClone backup.';
|
||||
this.logger.error(errorMsg);
|
||||
if (onError) {
|
||||
onError(errorMsg);
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
error: errorMsg,
|
||||
cleanupRequired: config.cleanupOnFailure,
|
||||
};
|
||||
}
|
||||
|
||||
await this.rcloneApiService.startBackup({
|
||||
srcPath: sourcePath,
|
||||
dstPath: `${config.remoteName}:${config.destinationPath}`,
|
||||
async: true,
|
||||
configId: backupConfigId,
|
||||
options: config.transferOptions,
|
||||
});
|
||||
|
||||
const groupIdToMonitor = getBackupJobGroupId(backupConfigId);
|
||||
|
||||
if (onOutput) {
|
||||
onOutput(
|
||||
`RClone backup process initiated for group: ${groupIdToMonitor}. Monitoring progress...`
|
||||
);
|
||||
}
|
||||
|
||||
let jobStatus = await this.rcloneApiService.getEnhancedJobStatus(
|
||||
groupIdToMonitor,
|
||||
backupConfigId
|
||||
);
|
||||
this.logger.debug('Rclone Job Status: %o', jobStatus);
|
||||
let retries = 0;
|
||||
const effectiveTimeout = config.timeout && config.timeout >= 60000 ? config.timeout : 3600000;
|
||||
const maxRetries = Math.floor(effectiveTimeout / 5000);
|
||||
|
||||
while (jobStatus && !jobStatus.finished && retries < maxRetries) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 5000));
|
||||
|
||||
try {
|
||||
jobStatus = await this.rcloneApiService.getEnhancedJobStatus(
|
||||
groupIdToMonitor,
|
||||
backupConfigId
|
||||
);
|
||||
if (jobStatus && onProgress && jobStatus.progressPercentage !== undefined) {
|
||||
onProgress(jobStatus.progressPercentage);
|
||||
}
|
||||
if (jobStatus && onOutput && jobStatus.stats?.speed) {
|
||||
onOutput(`Group ${groupIdToMonitor} - Transfer speed: ${jobStatus.stats.speed} B/s`);
|
||||
}
|
||||
} catch (pollError: any) {
|
||||
this.logger.warn(
|
||||
`[${backupConfigId}] Error polling group status for ${groupIdToMonitor}: ${(pollError as Error).message}`
|
||||
);
|
||||
}
|
||||
retries++;
|
||||
}
|
||||
|
||||
if (!jobStatus) {
|
||||
const errorMsg = `Failed to get final job status for RClone group ${groupIdToMonitor}`;
|
||||
this.logger.error(`[${backupConfigId}] ${errorMsg}`);
|
||||
if (onError) {
|
||||
onError(errorMsg);
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
error: errorMsg,
|
||||
destinationPath: `${config.remoteName}:${config.destinationPath}`,
|
||||
cleanupRequired: config.cleanupOnFailure,
|
||||
};
|
||||
}
|
||||
|
||||
if (jobStatus.finished && jobStatus.success) {
|
||||
if (onProgress) {
|
||||
onProgress(100);
|
||||
}
|
||||
if (onOutput) {
|
||||
onOutput(`RClone backup for group ${groupIdToMonitor} completed successfully.`);
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
destinationPath: `${config.remoteName}:${config.destinationPath}`,
|
||||
metadata: {
|
||||
groupId: groupIdToMonitor,
|
||||
remoteName: config.remoteName,
|
||||
remotePath: config.destinationPath,
|
||||
transferOptions: config.transferOptions,
|
||||
stats: jobStatus.stats,
|
||||
},
|
||||
};
|
||||
} else {
|
||||
let errorMsg: string;
|
||||
if (!jobStatus.finished && retries >= maxRetries) {
|
||||
errorMsg = `RClone group ${groupIdToMonitor} timed out after ${effectiveTimeout / 1000} seconds.`;
|
||||
this.logger.error(`[${backupConfigId}] ${errorMsg}`);
|
||||
} else {
|
||||
errorMsg = jobStatus.error || `RClone group ${groupIdToMonitor} failed.`;
|
||||
this.logger.error(`[${backupConfigId}] ${errorMsg}`, jobStatus.stats?.lastError);
|
||||
}
|
||||
|
||||
if (onError) {
|
||||
onError(errorMsg);
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
error: errorMsg,
|
||||
destinationPath: `${config.remoteName}:${config.destinationPath}`,
|
||||
metadata: {
|
||||
groupId: groupIdToMonitor,
|
||||
remoteName: config.remoteName,
|
||||
remotePath: config.destinationPath,
|
||||
transferOptions: config.transferOptions,
|
||||
stats: jobStatus.stats,
|
||||
},
|
||||
cleanupRequired: config.cleanupOnFailure,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async validate(
|
||||
config: RCloneDestinationConfig
|
||||
): Promise<{ valid: boolean; error?: string; warnings?: string[] }> {
|
||||
const warnings: string[] = [];
|
||||
|
||||
if (!config.remoteName) {
|
||||
return { valid: false, error: 'Remote name is required' };
|
||||
}
|
||||
|
||||
if (!config.destinationPath) {
|
||||
return { valid: false, error: 'Remote path is required' };
|
||||
}
|
||||
|
||||
if (config.useStreaming) {
|
||||
if (!config.sourceCommand) {
|
||||
return { valid: false, error: 'Source command is required for streaming backups' };
|
||||
}
|
||||
if (!config.sourceArgs || config.sourceArgs.length === 0) {
|
||||
return { valid: false, error: 'Source arguments are required for streaming backups' };
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const remotes = await this.rcloneApiService.listRemotes();
|
||||
if (!remotes.includes(config.remoteName)) {
|
||||
return { valid: false, error: `Remote '${config.remoteName}' not found` };
|
||||
}
|
||||
} catch (error) {
|
||||
return { valid: false, error: 'Failed to validate remote configuration' };
|
||||
}
|
||||
|
||||
if (config.timeout < 60000) {
|
||||
warnings.push('Timeout is less than 1 minute, which may be too short for large uploads');
|
||||
}
|
||||
|
||||
return { valid: true, warnings };
|
||||
}
|
||||
|
||||
async cleanup(result: BackupDestinationResult): Promise<void> {
|
||||
if (!result.cleanupRequired || !result.destinationPath) {
|
||||
return;
|
||||
}
|
||||
|
||||
const idToStop = result.metadata?.groupId || result.metadata?.jobId;
|
||||
|
||||
try {
|
||||
this.logger.log(`Cleaning up failed upload at ${result.destinationPath}`);
|
||||
|
||||
if (idToStop) {
|
||||
await this.rcloneApiService.stopJob(idToStop as string);
|
||||
if (result.metadata?.groupId) {
|
||||
this.logger.log(`Stopped RClone group: ${result.metadata.groupId}`);
|
||||
} else if (result.metadata?.jobId) {
|
||||
this.logger.log(
|
||||
`Attempted to stop RClone job: ${result.metadata.jobId} (Note: Group ID preferred for cleanup)`
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.warn(
|
||||
`Failed to cleanup destination: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
get supportsStreaming(): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
get getWritableStream(): (
|
||||
config: RCloneDestinationConfig,
|
||||
jobId: string,
|
||||
options?: BackupDestinationProcessorOptions
|
||||
) => Promise<StreamingDestinationHandle> {
|
||||
return async (
|
||||
config: RCloneDestinationConfig,
|
||||
jobId: string,
|
||||
options: BackupDestinationProcessorOptions = {}
|
||||
): Promise<StreamingDestinationHandle> => {
|
||||
const validation = await this.validate(config);
|
||||
if (!validation.valid) {
|
||||
const errorMsg = `RClone destination configuration validation failed: ${validation.error}`;
|
||||
this.logger.error(`[${jobId}] ${errorMsg}`);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
|
||||
const rcloneDest = `${config.remoteName}:${config.destinationPath}`;
|
||||
const rcloneArgs = ['rcat', rcloneDest, '--progress'];
|
||||
|
||||
this.logger.log(
|
||||
`[${jobId}] Preparing writable stream for rclone rcat to ${rcloneDest} with progress`
|
||||
);
|
||||
|
||||
try {
|
||||
const rcloneProcess = execa('rclone', rcloneArgs, {});
|
||||
|
||||
const completionPromise = new Promise<BackupDestinationResult>((resolve, reject) => {
|
||||
let stderrOutput = '';
|
||||
let stdoutOutput = '';
|
||||
|
||||
rcloneProcess.stderr?.on('data', (data) => {
|
||||
const chunk = data.toString();
|
||||
stderrOutput += chunk;
|
||||
this.logger.verbose(`[${jobId}] rclone rcat stderr: ${chunk.trim()}`);
|
||||
|
||||
const progressMatch = chunk.match(/(\d+)%/);
|
||||
if (progressMatch && progressMatch[1] && options.onProgress) {
|
||||
const percentage = parseInt(progressMatch[1], 10);
|
||||
if (!isNaN(percentage)) {
|
||||
options.onProgress(percentage);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
rcloneProcess.stdout?.on('data', (data) => {
|
||||
const chunk = data.toString();
|
||||
stdoutOutput += chunk;
|
||||
this.logger.verbose(`[${jobId}] rclone rcat stdout: ${chunk.trim()}`);
|
||||
});
|
||||
|
||||
rcloneProcess
|
||||
.then((result) => {
|
||||
this.logger.log(
|
||||
`[${jobId}] rclone rcat to ${rcloneDest} completed successfully.`
|
||||
);
|
||||
resolve({
|
||||
success: true,
|
||||
destinationPath: rcloneDest,
|
||||
metadata: { stdout: stdoutOutput, stderr: stderrOutput },
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
const errorMessage =
|
||||
error.stderr || error.message || 'rclone rcat command failed';
|
||||
this.logger.error(
|
||||
`[${jobId}] rclone rcat to ${rcloneDest} failed: ${errorMessage}`,
|
||||
error.stack
|
||||
);
|
||||
reject({
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
destinationPath: rcloneDest,
|
||||
metadata: { stdout: stdoutOutput, stderr: stderrOutput },
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
if (!rcloneProcess.stdin) {
|
||||
const errMsg = 'Failed to get stdin stream from rclone process.';
|
||||
this.logger.error(`[${jobId}] ${errMsg}`);
|
||||
throw new Error(errMsg);
|
||||
}
|
||||
|
||||
return {
|
||||
stream: rcloneProcess.stdin,
|
||||
completionPromise,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`[${jobId}] Failed to start rclone rcat process: ${errorMessage}`);
|
||||
throw new Error(`Failed to start rclone rcat process: ${errorMessage}`);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,189 @@
|
||||
import type { LabelElement, Layout, SchemaBasedCondition } from '@jsonforms/core';
|
||||
import { JsonSchema7, RuleEffect } from '@jsonforms/core';
|
||||
|
||||
import type { RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import type { DataSlice, SettingSlice, UIElement } from '@app/unraid-api/types/json-forms.js';
|
||||
import { getDestinationConfigSlice } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-jsonforms.config.js';
|
||||
import { getSourceConfigSlice } from '@app/unraid-api/graph/resolvers/backup/source/backup-source-jsonforms.config.js';
|
||||
import { createLabeledControl } from '@app/unraid-api/graph/utils/form-utils.js';
|
||||
import { mergeSettingSlices } from '@app/unraid-api/types/json-forms.js';
|
||||
|
||||
function getBasicBackupConfigSlice(): SettingSlice {
|
||||
const basicConfigElements: UIElement[] = [
|
||||
createLabeledControl({
|
||||
scope: '#/properties/name',
|
||||
label: 'Backup Job Name',
|
||||
description: 'A descriptive name for this backup job (e.g., "Weekly Documents Backup")',
|
||||
controlOptions: {
|
||||
placeholder: 'Enter backup job name',
|
||||
format: 'string',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/schedule',
|
||||
label: 'Schedule (Cron Expression)',
|
||||
description:
|
||||
'When to run this backup job. Leave empty for manual execution only. Examples: "0 2 * * *" (daily at 2AM), "0 2 * * 0" (weekly on Sunday at 2AM)',
|
||||
controlOptions: {
|
||||
placeholder: 'Leave empty for manual backup',
|
||||
format: 'string',
|
||||
suggestions: [
|
||||
{
|
||||
value: '',
|
||||
label: 'Manual Only',
|
||||
tooltip: 'No automatic schedule - run manually only',
|
||||
},
|
||||
{
|
||||
value: '0 2 * * *',
|
||||
label: 'Daily at 2:00 AM',
|
||||
tooltip: 'Runs every day at 2:00 AM',
|
||||
},
|
||||
{
|
||||
value: '0 2 * * 0',
|
||||
label: 'Weekly (Sunday 2:00 AM)',
|
||||
tooltip: 'Runs every Sunday at 2:00 AM',
|
||||
},
|
||||
{
|
||||
value: '0 9 * * 1',
|
||||
label: 'Mondays at 9:00 AM',
|
||||
tooltip: 'Runs every Monday at 9:00 AM',
|
||||
},
|
||||
{
|
||||
value: '0 0 1 * *',
|
||||
label: 'Monthly (1st day at midnight)',
|
||||
tooltip: 'Runs on the 1st day of every month at midnight',
|
||||
},
|
||||
{
|
||||
value: '0 2 1 * *',
|
||||
label: 'Monthly (1st at 2:00 AM)',
|
||||
tooltip: 'Runs on the 1st of every month at 2:00 AM',
|
||||
},
|
||||
{
|
||||
value: '0 2 * * 1-5',
|
||||
label: 'Weekdays at 2:00 AM',
|
||||
tooltip: 'Runs Monday through Friday at 2:00 AM',
|
||||
},
|
||||
],
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/enabled',
|
||||
label: 'Enable Backup Job',
|
||||
description: 'Whether this backup job should run automatically according to the schedule',
|
||||
controlOptions: {
|
||||
toggle: true,
|
||||
},
|
||||
rule: {
|
||||
effect: RuleEffect.SHOW,
|
||||
condition: {
|
||||
scope: '#/properties/schedule',
|
||||
schema: {
|
||||
type: 'string',
|
||||
minLength: 1,
|
||||
},
|
||||
} as SchemaBasedCondition,
|
||||
},
|
||||
}),
|
||||
];
|
||||
|
||||
const basicConfigProperties: Record<string, JsonSchema7> = {
|
||||
name: {
|
||||
type: 'string',
|
||||
title: 'Backup Job Name',
|
||||
description: 'Human-readable name for this backup job',
|
||||
minLength: 1,
|
||||
maxLength: 100,
|
||||
},
|
||||
schedule: {
|
||||
type: 'string',
|
||||
title: 'Cron Schedule',
|
||||
description: 'Cron schedule expression (empty for manual execution)',
|
||||
},
|
||||
enabled: {
|
||||
type: 'boolean',
|
||||
title: 'Enabled',
|
||||
description: 'Whether this backup job is enabled',
|
||||
default: true,
|
||||
},
|
||||
configStep: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
current: { type: 'integer', default: 0 },
|
||||
total: { type: 'integer', default: 3 },
|
||||
},
|
||||
default: { current: 0, total: 3 },
|
||||
},
|
||||
};
|
||||
|
||||
const verticalLayoutElement: UIElement = {
|
||||
type: 'VerticalLayout',
|
||||
elements: basicConfigElements,
|
||||
options: { step: 0 },
|
||||
};
|
||||
|
||||
return {
|
||||
properties: basicConfigProperties as unknown as DataSlice,
|
||||
elements: [verticalLayoutElement],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildBackupJobConfigSchema({ remotes = [] }: { remotes?: RCloneRemote[] }): {
|
||||
dataSchema: { properties: DataSlice; type: 'object' };
|
||||
uiSchema: Layout;
|
||||
} {
|
||||
const slicesToMerge: SettingSlice[] = [];
|
||||
|
||||
const basicSlice = getBasicBackupConfigSlice();
|
||||
slicesToMerge.push(basicSlice);
|
||||
|
||||
const sourceSlice = getSourceConfigSlice();
|
||||
slicesToMerge.push(sourceSlice);
|
||||
|
||||
const destinationSlice = getDestinationConfigSlice({ remotes });
|
||||
slicesToMerge.push(destinationSlice);
|
||||
|
||||
const mergedSlices = mergeSettingSlices(slicesToMerge);
|
||||
|
||||
const dataSchema: { properties: DataSlice; type: 'object' } = {
|
||||
type: 'object',
|
||||
properties: mergedSlices.properties,
|
||||
};
|
||||
|
||||
const steps = [
|
||||
{ label: 'Backup Configuration', description: 'Basic backup job settings' },
|
||||
{ label: 'Source Configuration', description: 'Configure what to backup' },
|
||||
{ label: 'Destination Configuration', description: 'Configure where to backup' },
|
||||
];
|
||||
|
||||
const step0Elements = basicSlice.elements;
|
||||
const step1Elements = sourceSlice.elements;
|
||||
const step2Elements = destinationSlice.elements;
|
||||
|
||||
const steppedLayoutElement: UIElement = {
|
||||
type: 'SteppedLayout',
|
||||
options: {
|
||||
steps: steps,
|
||||
},
|
||||
elements: [...(step0Elements || []), ...(step1Elements || []), ...(step2Elements || [])].filter(
|
||||
(el) => el
|
||||
) as UIElement[],
|
||||
};
|
||||
|
||||
const titleLabel: UIElement = {
|
||||
type: 'Label',
|
||||
text: 'Create Backup Job',
|
||||
options: {
|
||||
format: 'title',
|
||||
description: 'Configure a new scheduled backup job with RClone.',
|
||||
},
|
||||
};
|
||||
|
||||
const uiSchema: Layout = {
|
||||
type: 'VerticalLayout',
|
||||
elements: [titleLabel, steppedLayoutElement],
|
||||
};
|
||||
|
||||
return { dataSchema, uiSchema };
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
import { Field, GraphQLISODateTime, Int, ObjectType, registerEnumType } from '@nestjs/graphql';
|
||||
|
||||
import { Node } from '@unraid/shared/graphql.model';
|
||||
|
||||
// Moved BackupJobStatus enum here
|
||||
export enum BackupJobStatus {
|
||||
QUEUED = 'Queued',
|
||||
RUNNING = 'Running',
|
||||
COMPLETED = 'Completed',
|
||||
FAILED = 'Failed',
|
||||
CANCELLED = 'Cancelled',
|
||||
}
|
||||
|
||||
registerEnumType(BackupJobStatus, {
|
||||
name: 'BackupJobStatus',
|
||||
description: 'Status of a backup job',
|
||||
});
|
||||
|
||||
@ObjectType({
|
||||
implements: () => Node,
|
||||
})
|
||||
export class JobStatus extends Node {
|
||||
@Field(() => String, { description: 'External job ID from the job execution system' })
|
||||
externalJobId!: string;
|
||||
|
||||
@Field()
|
||||
name!: string;
|
||||
|
||||
@Field(() => BackupJobStatus)
|
||||
status!: BackupJobStatus;
|
||||
|
||||
@Field(() => Int, { description: 'Progress percentage (0-100)' })
|
||||
progress!: number;
|
||||
|
||||
@Field({ nullable: true })
|
||||
message?: string;
|
||||
|
||||
@Field({ nullable: true })
|
||||
error?: string;
|
||||
|
||||
@Field(() => GraphQLISODateTime)
|
||||
startTime!: Date;
|
||||
|
||||
@Field(() => GraphQLISODateTime, { nullable: true })
|
||||
endTime?: Date;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Bytes transferred' })
|
||||
bytesTransferred?: number;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Total bytes to transfer' })
|
||||
totalBytes?: number;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Transfer speed in bytes per second' })
|
||||
speed?: number;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Elapsed time in seconds' })
|
||||
elapsedTime?: number;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Estimated time to completion in seconds' })
|
||||
eta?: number;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Human-readable bytes transferred' })
|
||||
formattedBytesTransferred?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Human-readable transfer speed' })
|
||||
formattedSpeed?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Human-readable elapsed time' })
|
||||
formattedElapsedTime?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Human-readable ETA' })
|
||||
formattedEta?: string;
|
||||
}
|
||||
|
||||
// Use JobStatus as the unified type for both GraphQL and TypeScript
|
||||
export type JobStatusInfo = JobStatus;
|
||||
@@ -0,0 +1,30 @@
|
||||
import { Args, Query, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
|
||||
import { JobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js';
|
||||
|
||||
@Resolver(() => JobStatus)
|
||||
export class BackupJobStatusResolver {
|
||||
constructor(private readonly jobTrackingService: BackupJobTrackingService) {}
|
||||
|
||||
@Query(() => JobStatus, { name: 'backupJobStatus', nullable: true })
|
||||
async getJobStatus(
|
||||
@Args('jobId', { type: () => PrefixedID }) jobId: string
|
||||
): Promise<JobStatus | null> {
|
||||
const jobStatus = this.jobTrackingService.getJobStatus(jobId);
|
||||
if (!jobStatus) {
|
||||
// Optionally throw NotFoundException or return null based on desired API behavior
|
||||
// throw new NotFoundException(`Job with ID ${jobId} not found.`);
|
||||
return null;
|
||||
}
|
||||
return jobStatus as JobStatus; // Map JobStatusInfo to JobStatusGQL if necessary
|
||||
}
|
||||
|
||||
@Query(() => [JobStatus], { name: 'allBackupJobStatuses' })
|
||||
async getAllJobStatuses(): Promise<JobStatus[]> {
|
||||
const statuses = this.jobTrackingService.getAllJobStatuses();
|
||||
return statuses as JobStatus[]; // Map JobStatusInfo[] to JobStatusGQL[] if necessary
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
|
||||
import {
|
||||
BackupJobStatus,
|
||||
JobStatus,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
|
||||
@Injectable()
|
||||
export class BackupJobTrackingService {
|
||||
private readonly logger = new Logger(BackupJobTrackingService.name);
|
||||
private activeJobs: Map<string, JobStatus> = new Map(); // Maps internal ID -> JobStatus
|
||||
private externalJobIndex: Map<string, string> = new Map(); // Maps external ID -> internal ID
|
||||
|
||||
constructor() {
|
||||
// Potentially load persisted jobs if needed
|
||||
}
|
||||
|
||||
initializeJob(externalJobId: string, jobName: string): JobStatus {
|
||||
// Check if external job already exists
|
||||
const existingInternalId = this.externalJobIndex.get(externalJobId);
|
||||
if (existingInternalId && this.activeJobs.has(existingInternalId)) {
|
||||
this.logger.warn(`Job with external ID ${externalJobId} is already initialized.`);
|
||||
return this.activeJobs.get(existingInternalId)!;
|
||||
}
|
||||
|
||||
const internalId = `job_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
const newJobStatus: JobStatus = {
|
||||
id: internalId,
|
||||
externalJobId,
|
||||
name: jobName,
|
||||
status: BackupJobStatus.QUEUED,
|
||||
progress: 0,
|
||||
startTime: new Date(),
|
||||
message: 'Job initialized.',
|
||||
};
|
||||
|
||||
this.activeJobs.set(internalId, newJobStatus);
|
||||
this.externalJobIndex.set(externalJobId, internalId);
|
||||
this.logger.log(
|
||||
`Job initialized: ${jobName} (Internal ID: ${internalId}, External ID: ${externalJobId})`
|
||||
);
|
||||
return newJobStatus;
|
||||
}
|
||||
|
||||
updateJobStatus(
|
||||
internalId: string,
|
||||
updates: Partial<Omit<JobStatus, 'externalJobId' | 'startTime' | 'name' | 'id'>>
|
||||
): JobStatus | null {
|
||||
const job = this.activeJobs.get(internalId);
|
||||
if (!job) {
|
||||
this.logger.warn(`Cannot update status for unknown internal job ID: ${internalId}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const updatedJob = { ...job, ...updates };
|
||||
|
||||
if (
|
||||
updates.status === BackupJobStatus.COMPLETED ||
|
||||
updates.status === BackupJobStatus.FAILED ||
|
||||
updates.status === BackupJobStatus.CANCELLED
|
||||
) {
|
||||
updatedJob.endTime = new Date();
|
||||
updatedJob.progress = updates.status === BackupJobStatus.COMPLETED ? 100 : job.progress;
|
||||
}
|
||||
|
||||
if (updatedJob.progress > 100) {
|
||||
updatedJob.progress = 100;
|
||||
}
|
||||
|
||||
this.activeJobs.set(internalId, updatedJob);
|
||||
this.logger.log(
|
||||
`Job status updated for ${job.name} (Internal ID: ${internalId}): Status: ${updatedJob.status}, Progress: ${updatedJob.progress}%`
|
||||
);
|
||||
return updatedJob;
|
||||
}
|
||||
|
||||
// For external systems (like RClone) to update job status
|
||||
updateJobStatusByExternalId(
|
||||
externalJobId: string,
|
||||
updates: Partial<Omit<JobStatus, 'externalJobId' | 'startTime' | 'name' | 'id'>>
|
||||
): JobStatus | null {
|
||||
const internalId = this.externalJobIndex.get(externalJobId);
|
||||
if (!internalId) {
|
||||
this.logger.warn(`Cannot find internal job for external ID: ${externalJobId}`);
|
||||
return null;
|
||||
}
|
||||
return this.updateJobStatus(internalId, updates);
|
||||
}
|
||||
|
||||
getJobStatus(internalId: string): JobStatus | undefined {
|
||||
return this.activeJobs.get(internalId);
|
||||
}
|
||||
|
||||
getJobStatusByExternalId(externalJobId: string): JobStatus | undefined {
|
||||
const internalId = this.externalJobIndex.get(externalJobId);
|
||||
return internalId ? this.activeJobs.get(internalId) : undefined;
|
||||
}
|
||||
|
||||
getAllJobStatuses(): JobStatus[] {
|
||||
return Array.from(this.activeJobs.values());
|
||||
}
|
||||
|
||||
clearJob(internalId: string): boolean {
|
||||
const job = this.activeJobs.get(internalId);
|
||||
if (job) {
|
||||
this.externalJobIndex.delete(job.externalJobId);
|
||||
}
|
||||
return this.activeJobs.delete(internalId);
|
||||
}
|
||||
|
||||
clearJobByExternalId(externalJobId: string): boolean {
|
||||
const internalId = this.externalJobIndex.get(externalJobId);
|
||||
if (internalId) {
|
||||
this.externalJobIndex.delete(externalJobId);
|
||||
return this.activeJobs.delete(internalId);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,534 @@
|
||||
import { forwardRef, Inject, Injectable, Logger } from '@nestjs/common';
|
||||
import { Readable } from 'stream';
|
||||
import { pipeline } from 'stream/promises'; // Using stream.pipeline for better error handling
|
||||
|
||||
import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js';
|
||||
import { BackupJobConfig } from '@app/unraid-api/graph/resolvers/backup/backup.model.js';
|
||||
import {
|
||||
BackupDestinationProcessor,
|
||||
BackupDestinationProcessorOptions,
|
||||
BackupDestinationResult,
|
||||
StreamingDestinationHandle, // Assuming this will be defined in the interface file
|
||||
} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.js';
|
||||
import { BackupDestinationService } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.service.js';
|
||||
import {
|
||||
BackupJobStatus,
|
||||
JobStatus,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js';
|
||||
import {
|
||||
BackupSourceProcessor,
|
||||
BackupSourceProcessorOptions,
|
||||
BackupSourceResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js';
|
||||
import { BackupSourceService } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.service.js';
|
||||
|
||||
@Injectable()
|
||||
export class BackupOrchestrationService {
|
||||
private readonly logger = new Logger(BackupOrchestrationService.name);
|
||||
|
||||
constructor(
|
||||
private readonly jobTrackingService: BackupJobTrackingService,
|
||||
private readonly backupSourceService: BackupSourceService,
|
||||
private readonly backupDestinationService: BackupDestinationService,
|
||||
@Inject(forwardRef(() => BackupConfigService))
|
||||
private readonly backupConfigService: BackupConfigService
|
||||
) {}
|
||||
|
||||
async executeBackupJob(jobConfig: BackupJobConfig, configId: string): Promise<string> {
|
||||
this.logger.log(
|
||||
`Starting orchestration for backup job: ${jobConfig.name} (Config ID: ${configId})`
|
||||
);
|
||||
|
||||
// Initialize job in tracking service and get the internal tracking object
|
||||
// configId (original jobConfig.id) is used to link tracking to config, jobConfig.name is for display
|
||||
const jobStatus = this.jobTrackingService.initializeJob(configId, jobConfig.name);
|
||||
const internalJobId = jobStatus.id; // This is the actual ID for this specific job run
|
||||
|
||||
// DO NOT call backupConfigService.updateBackupJobConfig here for currentJobId
|
||||
// This will be handled by BackupConfigService itself using the returned internalJobId
|
||||
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.RUNNING,
|
||||
progress: 0,
|
||||
message: 'Job initializing...',
|
||||
});
|
||||
|
||||
const sourceProcessor = this.backupSourceService.getProcessor(jobConfig.sourceType);
|
||||
const destinationProcessor = this.backupDestinationService.getProcessor(
|
||||
jobConfig.destinationType
|
||||
);
|
||||
|
||||
if (!sourceProcessor || !destinationProcessor) {
|
||||
const errorMsg = 'Failed to initialize backup processors.';
|
||||
this.logger.error(`[Config ID: ${configId}, Job ID: ${internalJobId}] ${errorMsg}`);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: errorMsg,
|
||||
});
|
||||
// Call handleJobCompletion before throwing
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
configId,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
|
||||
try {
|
||||
if (sourceProcessor.supportsStreaming && destinationProcessor.supportsStreaming) {
|
||||
await this.executeStreamingBackup(
|
||||
sourceProcessor,
|
||||
destinationProcessor,
|
||||
jobConfig,
|
||||
internalJobId
|
||||
);
|
||||
} else {
|
||||
await this.executeRegularBackup(
|
||||
sourceProcessor,
|
||||
destinationProcessor,
|
||||
jobConfig,
|
||||
internalJobId,
|
||||
configId // Pass configId for handleJobCompletion
|
||||
);
|
||||
}
|
||||
// If executeStreamingBackup/executeRegularBackup complete without throwing, it implies success for those stages.
|
||||
// The final status (COMPLETED/FAILED) is set within those methods via emitJobStatus and then handleJobCompletion.
|
||||
} catch (error) {
|
||||
// Errors from executeStreamingBackup/executeRegularBackup should have already called handleJobCompletion.
|
||||
// This catch is a fallback.
|
||||
this.logger.error(
|
||||
`[Config ID: ${configId}, Job ID: ${internalJobId}] Orchestration error after backup execution attempt: ${(error as Error).message}`
|
||||
);
|
||||
// Ensure completion is handled if not already done by the execution methods
|
||||
// This might be redundant if execution methods are guaranteed to call it.
|
||||
// However, direct throws before or after calling those methods would be caught here.
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
configId,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
throw error; // Re-throw the error
|
||||
}
|
||||
// DO NOT clear currentJobId here using updateBackupJobConfig. It's handled by handleJobCompletion.
|
||||
|
||||
this.logger.log(
|
||||
`Finished orchestration logic for backup job: ${jobConfig.name} (Config ID: ${configId}, Job ID: ${internalJobId})`
|
||||
);
|
||||
return internalJobId; // Return the actual job ID for this run
|
||||
}
|
||||
|
||||
private async executeStreamingBackup(
|
||||
sourceProcessor: BackupSourceProcessor<any>,
|
||||
destinationProcessor: BackupDestinationProcessor<any>,
|
||||
jobConfig: BackupJobConfig, // This is the config object, not its ID
|
||||
internalJobId: string
|
||||
): Promise<void> {
|
||||
this.logger.log(
|
||||
`Executing STREAMING backup for job: ${jobConfig.name} (Internal Job ID: ${internalJobId})`
|
||||
);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.RUNNING,
|
||||
progress: 0,
|
||||
message: 'Starting streaming backup...',
|
||||
});
|
||||
|
||||
if (!sourceProcessor.getReadableStream || !destinationProcessor.getWritableStream) {
|
||||
const errorMsg =
|
||||
'Source or destination processor does not support streaming (missing getReadableStream or getWritableStream).';
|
||||
this.logger.error(`[${internalJobId}] ${errorMsg}`);
|
||||
this.emitJobStatus(internalJobId, { status: BackupJobStatus.FAILED, error: errorMsg });
|
||||
// Call handleJobCompletion before throwing
|
||||
await this.backupConfigService.handleJobCompletion(internalJobId, BackupJobStatus.FAILED);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
|
||||
let sourceStream: Readable | null = null;
|
||||
let destinationStreamHandle: StreamingDestinationHandle | null = null;
|
||||
|
||||
const processorOptions: BackupDestinationProcessorOptions = {
|
||||
jobId: internalJobId,
|
||||
onProgress: (progress: number) => {
|
||||
this.logger.log(`[${internalJobId}] Destination progress: ${progress}%`);
|
||||
this.emitJobStatus(internalJobId, { progress: Math.min(progress, 99) });
|
||||
},
|
||||
onOutput: (data: string) => {
|
||||
this.logger.debug(`[${internalJobId} Dest. Processor Output]: ${data}`);
|
||||
},
|
||||
onError: (errorMsg: string) => {
|
||||
this.logger.warn(`[${internalJobId} Dest. Processor Error]: ${errorMsg}`);
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
this.logger.debug(`[${internalJobId}] Preparing source stream...`);
|
||||
sourceStream = await sourceProcessor.getReadableStream(jobConfig.sourceConfig);
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Source stream prepared. Preparing destination stream...`
|
||||
);
|
||||
destinationStreamHandle = await destinationProcessor.getWritableStream(
|
||||
jobConfig.destinationConfig,
|
||||
internalJobId,
|
||||
processorOptions
|
||||
);
|
||||
this.logger.debug(`[${internalJobId}] Destination stream prepared. Starting stream pipe.`);
|
||||
|
||||
if (!sourceStream || !destinationStreamHandle?.stream) {
|
||||
throw new Error('Failed to initialize source or destination stream.');
|
||||
}
|
||||
|
||||
let totalBytesProcessed = 0;
|
||||
sourceStream.on('data', (chunk) => {
|
||||
totalBytesProcessed += chunk.length;
|
||||
this.logger.verbose(
|
||||
`[${internalJobId}] Stream data: ${chunk.length} bytes, Total: ${totalBytesProcessed}`
|
||||
);
|
||||
});
|
||||
|
||||
await pipeline(sourceStream, destinationStreamHandle.stream);
|
||||
|
||||
this.logger.log(
|
||||
`[${internalJobId}] Stream piping completed. Waiting for destination processor to finalize...`
|
||||
);
|
||||
|
||||
const destinationResult = await destinationStreamHandle.completionPromise;
|
||||
|
||||
if (!destinationResult.success) {
|
||||
const errorMsg =
|
||||
destinationResult.error || 'Destination processor failed after streaming.';
|
||||
this.logger.error(`[${internalJobId}] ${errorMsg}`);
|
||||
this.emitJobStatus(internalJobId, { status: BackupJobStatus.FAILED, error: errorMsg });
|
||||
// Call handleJobCompletion before throwing
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
jobConfig.id,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
|
||||
this.logger.log(
|
||||
`Streaming backup job ${jobConfig.name} (Internal ID: ${internalJobId}) completed successfully.`
|
||||
);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.COMPLETED,
|
||||
progress: 100,
|
||||
message: 'Backup completed successfully.',
|
||||
});
|
||||
// Call handleJobCompletion on success
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
jobConfig.id,
|
||||
BackupJobStatus.COMPLETED,
|
||||
internalJobId
|
||||
);
|
||||
|
||||
if (sourceProcessor.cleanup) {
|
||||
this.logger.debug(`[${internalJobId}] Performing post-success cleanup for source...`);
|
||||
await sourceProcessor.cleanup({
|
||||
success: true,
|
||||
outputPath: 'streamed',
|
||||
cleanupRequired: false,
|
||||
} as any);
|
||||
}
|
||||
if (destinationProcessor.cleanup) {
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Performing post-success cleanup for destination...`
|
||||
);
|
||||
await destinationProcessor.cleanup({ success: true, cleanupRequired: false });
|
||||
}
|
||||
} catch (e) {
|
||||
const error = e as Error;
|
||||
this.logger.error(
|
||||
`Streaming backup job ${jobConfig.name} (Internal ID: ${internalJobId}) failed: ${error.message}`,
|
||||
error.stack
|
||||
);
|
||||
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: error.message,
|
||||
message: 'Backup failed during streaming execution.',
|
||||
});
|
||||
// Call handleJobCompletion on failure
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
jobConfig.id,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Performing cleanup due to failure for job ${jobConfig.name}...`
|
||||
);
|
||||
try {
|
||||
if (sourceProcessor.cleanup) {
|
||||
this.logger.debug(`[${internalJobId}] Cleaning up source processor...`);
|
||||
await sourceProcessor.cleanup({
|
||||
success: false,
|
||||
error: error.message,
|
||||
cleanupRequired: true,
|
||||
} as any);
|
||||
}
|
||||
} catch (cleanupError) {
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Error during source processor cleanup: ${(cleanupError as Error).message}`,
|
||||
(cleanupError as Error).stack
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
if (destinationProcessor.cleanup) {
|
||||
this.logger.debug(`[${internalJobId}] Cleaning up destination processor...`);
|
||||
const destCleanupError =
|
||||
(
|
||||
destinationStreamHandle?.completionPromise &&
|
||||
((await destinationStreamHandle.completionPromise.catch(
|
||||
(er) => er
|
||||
)) as BackupDestinationResult)
|
||||
)?.error || error.message;
|
||||
await destinationProcessor.cleanup({
|
||||
success: false,
|
||||
error: destCleanupError,
|
||||
cleanupRequired: true,
|
||||
});
|
||||
}
|
||||
} catch (cleanupError) {
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Error during destination processor cleanup: ${(cleanupError as Error).message}`,
|
||||
(cleanupError as Error).stack
|
||||
);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private async executeRegularBackup(
|
||||
sourceProcessor: BackupSourceProcessor<any>,
|
||||
destinationProcessor: BackupDestinationProcessor<any>,
|
||||
jobConfig: BackupJobConfig, // This is the config object, not its ID
|
||||
internalJobId: string,
|
||||
configId: string // Pass the configId for handleJobCompletion
|
||||
): Promise<void> {
|
||||
this.logger.log(
|
||||
`Executing REGULAR backup for job: ${jobConfig.name} (Config ID: ${configId}, Internal Job ID: ${internalJobId})`
|
||||
);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.RUNNING,
|
||||
progress: 0,
|
||||
message: 'Starting regular backup...',
|
||||
});
|
||||
|
||||
let sourceResult: BackupSourceResult | null = null;
|
||||
let destinationResult: BackupDestinationResult | null = null;
|
||||
|
||||
const processorOptions: BackupSourceProcessorOptions & BackupDestinationProcessorOptions = {
|
||||
jobId: internalJobId,
|
||||
onProgress: (progressUpdate) => {
|
||||
const numericProgress =
|
||||
typeof progressUpdate === 'number'
|
||||
? progressUpdate
|
||||
: (progressUpdate as any).progress;
|
||||
if (typeof numericProgress === 'number') {
|
||||
this.emitJobStatus(internalJobId, { progress: numericProgress });
|
||||
}
|
||||
},
|
||||
onOutput: (data: string) => {
|
||||
this.logger.debug(`[${internalJobId} Processor Output]: ${data}`);
|
||||
},
|
||||
onError: (errorMsg: string) => {
|
||||
this.logger.warn(`[${internalJobId} Processor Error]: ${errorMsg}`);
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
this.logger.debug(`[${internalJobId}] Executing source processor...`);
|
||||
sourceResult = await sourceProcessor.execute(jobConfig.sourceConfig, processorOptions);
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Source processor execution completed. Success: ${sourceResult.success}, OutputPath: ${sourceResult.outputPath}`
|
||||
);
|
||||
|
||||
if (!sourceResult.success || !sourceResult.outputPath) {
|
||||
const errorMsg =
|
||||
sourceResult.error || 'Source processor failed to produce an output path.';
|
||||
this.logger.error(`[${internalJobId}] Source processor failed: ${errorMsg}`);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: errorMsg,
|
||||
message: 'Source processing failed.',
|
||||
});
|
||||
this.jobTrackingService.updateJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: errorMsg,
|
||||
});
|
||||
// Call handleJobCompletion before throwing
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
configId,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
this.emitJobStatus(internalJobId, {
|
||||
progress: 50,
|
||||
message: 'Source processing complete. Starting destination processing.',
|
||||
});
|
||||
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Executing destination processor with source output: ${sourceResult.outputPath}...`
|
||||
);
|
||||
destinationResult = await destinationProcessor.execute(
|
||||
sourceResult.outputPath,
|
||||
jobConfig.destinationConfig,
|
||||
processorOptions
|
||||
);
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Destination processor execution completed. Success: ${destinationResult.success}`
|
||||
);
|
||||
|
||||
if (!destinationResult.success) {
|
||||
const errorMsg = destinationResult.error || 'Destination processor failed.';
|
||||
this.logger.error(`[${internalJobId}] Destination processor failed: ${errorMsg}`);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: errorMsg,
|
||||
message: 'Destination processing failed.',
|
||||
});
|
||||
this.jobTrackingService.updateJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: errorMsg,
|
||||
});
|
||||
// Call handleJobCompletion before throwing
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
configId,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
|
||||
this.logger.log(
|
||||
`Regular backup job ${jobConfig.name} (Internal ID: ${internalJobId}) completed successfully.`
|
||||
);
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.COMPLETED,
|
||||
progress: 100,
|
||||
message: 'Backup completed successfully.',
|
||||
});
|
||||
// Call handleJobCompletion on success
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
configId,
|
||||
BackupJobStatus.COMPLETED,
|
||||
internalJobId
|
||||
);
|
||||
|
||||
if (sourceResult && sourceProcessor.cleanup) {
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Performing post-success cleanup for source processor...`
|
||||
);
|
||||
await sourceProcessor.cleanup(sourceResult);
|
||||
}
|
||||
if (destinationResult && destinationProcessor.cleanup) {
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Performing post-success cleanup for destination processor...`
|
||||
);
|
||||
await destinationProcessor.cleanup(destinationResult);
|
||||
}
|
||||
} catch (e) {
|
||||
const error = e as Error;
|
||||
this.logger.error(
|
||||
`Regular backup job ${jobConfig.name} (Internal ID: ${internalJobId}) failed: ${error.message}`,
|
||||
error.stack
|
||||
);
|
||||
|
||||
this.emitJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: error.message,
|
||||
message: 'Backup failed during regular execution.',
|
||||
});
|
||||
this.jobTrackingService.updateJobStatus(internalJobId, {
|
||||
status: BackupJobStatus.FAILED,
|
||||
error: error.message,
|
||||
});
|
||||
// Call handleJobCompletion on failure
|
||||
await this.backupConfigService.handleJobCompletion(
|
||||
configId,
|
||||
BackupJobStatus.FAILED,
|
||||
internalJobId
|
||||
);
|
||||
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Performing cleanup due to failure for job ${jobConfig.name}...`
|
||||
);
|
||||
if (sourceResult && sourceProcessor.cleanup) {
|
||||
try {
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Cleaning up source processor after failure...`
|
||||
);
|
||||
await sourceProcessor.cleanup({
|
||||
...sourceResult,
|
||||
success: false,
|
||||
error: sourceResult.error || error.message,
|
||||
});
|
||||
} catch (cleanupError) {
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Error during source processor cleanup: ${(cleanupError as Error).message}`,
|
||||
(cleanupError as Error).stack
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (destinationResult && destinationProcessor.cleanup) {
|
||||
try {
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Cleaning up destination processor after failure...`
|
||||
);
|
||||
await destinationProcessor.cleanup({
|
||||
...destinationResult,
|
||||
success: false,
|
||||
error: destinationResult.error || error.message,
|
||||
});
|
||||
} catch (cleanupError) {
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Error during destination processor cleanup: ${(cleanupError as Error).message}`,
|
||||
(cleanupError as Error).stack
|
||||
);
|
||||
}
|
||||
} else if (sourceResult?.success && destinationProcessor.cleanup) {
|
||||
try {
|
||||
this.logger.debug(
|
||||
`[${internalJobId}] Cleaning up destination processor after a failure (destinationResult not available)...`
|
||||
);
|
||||
await destinationProcessor.cleanup({
|
||||
success: false,
|
||||
error: error.message,
|
||||
cleanupRequired: true,
|
||||
});
|
||||
} catch (cleanupError) {
|
||||
this.logger.error(
|
||||
`[${internalJobId}] Error during destination processor cleanup (no result): ${(cleanupError as Error).message}`,
|
||||
(cleanupError as Error).stack
|
||||
);
|
||||
}
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private emitJobStatus(
|
||||
internalJobId: string,
|
||||
statusUpdate: {
|
||||
status?: BackupJobStatus;
|
||||
progress?: number;
|
||||
message?: string;
|
||||
error?: string;
|
||||
}
|
||||
): void {
|
||||
this.logger.log(
|
||||
`[Job Status Update - ${internalJobId}]: Status: ${statusUpdate.status}, Progress: ${statusUpdate.progress}, Msg: ${statusUpdate.message}, Err: ${statusUpdate.error}`
|
||||
);
|
||||
|
||||
const updatePayload: Partial<Omit<JobStatus, 'externalJobId' | 'startTime' | 'name' | 'id'>> = {
|
||||
...statusUpdate,
|
||||
};
|
||||
this.jobTrackingService.updateJobStatus(internalJobId, updatePayload);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,503 @@
|
||||
import type { LabelElement, SchemaBasedCondition } from '@jsonforms/core';
|
||||
import { JsonSchema7, RuleEffect } from '@jsonforms/core';
|
||||
|
||||
import type { DataSlice, SettingSlice, UIElement } from '@app/unraid-api/types/json-forms.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { createLabeledControl } from '@app/unraid-api/graph/utils/form-utils.js';
|
||||
|
||||
export function getSourceConfigSlice(): SettingSlice {
|
||||
const sourceConfigElements: UIElement[] = [
|
||||
{
|
||||
type: 'Control',
|
||||
scope: '#/properties/sourceConfig/properties/type',
|
||||
options: {
|
||||
format: 'radio',
|
||||
radioLayout: 'horizontal',
|
||||
options: [
|
||||
{
|
||||
label: 'ZFS Snapshot',
|
||||
value: SourceType.ZFS,
|
||||
description: 'Create ZFS snapshot and backup',
|
||||
},
|
||||
{
|
||||
label: 'Flash Drive',
|
||||
value: SourceType.FLASH,
|
||||
description: 'Backup flash drive contents',
|
||||
},
|
||||
{
|
||||
label: 'Custom Script',
|
||||
value: SourceType.SCRIPT,
|
||||
description: 'Run custom script to generate backup data',
|
||||
},
|
||||
{
|
||||
label: 'Raw Files',
|
||||
value: SourceType.RAW,
|
||||
description: 'Direct file backup without preprocessing',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/timeout',
|
||||
label: 'Timeout',
|
||||
description: 'Timeout in seconds for backup operation',
|
||||
controlOptions: {
|
||||
placeholder: '3600',
|
||||
format: 'number',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/cleanupOnFailure',
|
||||
label: 'Cleanup on Failure',
|
||||
description: 'Clean up backup artifacts on failure',
|
||||
controlOptions: {
|
||||
format: 'toggle',
|
||||
},
|
||||
}),
|
||||
|
||||
// Raw Backup Configuration
|
||||
{
|
||||
type: 'VerticalLayout',
|
||||
rule: {
|
||||
effect: RuleEffect.SHOW,
|
||||
condition: {
|
||||
scope: '#/properties/sourceConfig/properties/type',
|
||||
schema: { const: SourceType.RAW },
|
||||
} as SchemaBasedCondition,
|
||||
},
|
||||
elements: [
|
||||
{
|
||||
type: 'Label',
|
||||
text: 'Raw Backup Configuration',
|
||||
options: {
|
||||
description: 'Configure direct file/folder backup settings.',
|
||||
},
|
||||
} as LabelElement,
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/rawConfig/properties/sourcePath',
|
||||
label: 'Source Path',
|
||||
description: 'Source path to backup',
|
||||
controlOptions: {
|
||||
placeholder: '/mnt/user/data',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/rawConfig/properties/excludePatterns',
|
||||
label: 'Exclude Patterns',
|
||||
description: 'Patterns to exclude from backup',
|
||||
controlOptions: {
|
||||
placeholder: '*.tmp,*.log',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/rawConfig/properties/includePatterns',
|
||||
label: 'Include Patterns',
|
||||
description: 'Patterns to include in backup',
|
||||
controlOptions: {
|
||||
placeholder: '*.txt,*.doc',
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
|
||||
// ZFS Configuration
|
||||
{
|
||||
type: 'VerticalLayout',
|
||||
rule: {
|
||||
effect: RuleEffect.SHOW,
|
||||
condition: {
|
||||
scope: '#/properties/sourceConfig/properties/type',
|
||||
schema: { const: SourceType.ZFS },
|
||||
} as SchemaBasedCondition,
|
||||
},
|
||||
elements: [
|
||||
{
|
||||
type: 'Label',
|
||||
text: 'ZFS Configuration',
|
||||
options: {
|
||||
description: 'Configure ZFS snapshot settings for backup.',
|
||||
},
|
||||
} as LabelElement,
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/zfsConfig/properties/poolName',
|
||||
label: 'Pool Name',
|
||||
description: 'ZFS pool name',
|
||||
controlOptions: {
|
||||
placeholder: 'tank',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/zfsConfig/properties/datasetName',
|
||||
label: 'Dataset Name',
|
||||
description: 'ZFS dataset name',
|
||||
controlOptions: {
|
||||
placeholder: 'data',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/zfsConfig/properties/snapshotPrefix',
|
||||
label: 'Snapshot Prefix',
|
||||
description: 'Prefix for snapshot names',
|
||||
controlOptions: {
|
||||
placeholder: 'backup',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/zfsConfig/properties/cleanupSnapshots',
|
||||
label: 'Cleanup Snapshots',
|
||||
description: 'Clean up snapshots after backup',
|
||||
controlOptions: {
|
||||
format: 'checkbox',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/zfsConfig/properties/retainSnapshots',
|
||||
label: 'Retain Snapshots',
|
||||
description: 'Number of snapshots to retain',
|
||||
controlOptions: {
|
||||
placeholder: '5',
|
||||
format: 'number',
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
|
||||
// Flash Configuration
|
||||
{
|
||||
type: 'VerticalLayout',
|
||||
rule: {
|
||||
effect: RuleEffect.SHOW,
|
||||
condition: {
|
||||
scope: '#/properties/sourceConfig/properties/type',
|
||||
schema: { const: SourceType.FLASH },
|
||||
} as SchemaBasedCondition,
|
||||
},
|
||||
elements: [
|
||||
{
|
||||
type: 'Label',
|
||||
text: 'Flash Backup Configuration',
|
||||
options: {
|
||||
description: 'Configure Unraid flash drive backup settings.',
|
||||
},
|
||||
} as LabelElement,
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/flashConfig/properties/flashPath',
|
||||
label: 'Flash Path',
|
||||
description: 'Path to flash drive',
|
||||
controlOptions: {
|
||||
placeholder: '/boot',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/flashConfig/properties/includeGitHistory',
|
||||
label: 'Include Git History',
|
||||
description: 'Include git history in backup',
|
||||
controlOptions: {
|
||||
format: 'checkbox',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/flashConfig/properties/additionalPaths',
|
||||
label: 'Additional Paths',
|
||||
description: 'Additional paths to include',
|
||||
controlOptions: {
|
||||
placeholder: '/etc/config',
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
|
||||
// Script Configuration
|
||||
{
|
||||
type: 'VerticalLayout',
|
||||
rule: {
|
||||
effect: RuleEffect.SHOW,
|
||||
condition: {
|
||||
scope: '#/properties/sourceConfig/properties/type',
|
||||
schema: { const: SourceType.SCRIPT },
|
||||
} as SchemaBasedCondition,
|
||||
},
|
||||
elements: [
|
||||
{
|
||||
type: 'Label',
|
||||
text: 'Custom Script Configuration',
|
||||
options: {
|
||||
description: 'Configure custom script execution settings.',
|
||||
},
|
||||
} as LabelElement,
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/scriptConfig/properties/scriptPath',
|
||||
label: 'Script Path',
|
||||
description: 'Path to script file',
|
||||
controlOptions: {
|
||||
placeholder: '/usr/local/bin/backup.sh',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/scriptConfig/properties/scriptArgs',
|
||||
label: 'Script Arguments',
|
||||
description: 'Arguments for script',
|
||||
controlOptions: {
|
||||
placeholder: '--verbose --compress',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/scriptConfig/properties/workingDirectory',
|
||||
label: 'Working Directory',
|
||||
description: 'Working directory for script',
|
||||
controlOptions: {
|
||||
placeholder: '/tmp',
|
||||
},
|
||||
}),
|
||||
|
||||
createLabeledControl({
|
||||
scope: '#/properties/sourceConfig/properties/scriptConfig/properties/outputPath',
|
||||
label: 'Output Path',
|
||||
description: 'Path for script output',
|
||||
controlOptions: {
|
||||
placeholder: '/tmp/backup.tar.gz',
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const sourceConfigProperties: Record<string, JsonSchema7> = {
|
||||
sourceConfig: {
|
||||
type: 'object',
|
||||
title: 'Source Configuration',
|
||||
description: 'Configuration for backup source',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
title: 'Backup Type',
|
||||
description: 'Type of backup to perform',
|
||||
enum: [SourceType.ZFS, SourceType.FLASH, SourceType.SCRIPT, SourceType.RAW],
|
||||
default: SourceType.ZFS,
|
||||
},
|
||||
timeout: {
|
||||
type: 'integer',
|
||||
title: 'Timeout',
|
||||
description: 'Timeout in seconds for backup operation',
|
||||
minimum: 30,
|
||||
maximum: 86400,
|
||||
default: 3600,
|
||||
},
|
||||
cleanupOnFailure: {
|
||||
type: 'boolean',
|
||||
title: 'Cleanup on Failure',
|
||||
description: 'Clean up backup artifacts on failure',
|
||||
default: true,
|
||||
},
|
||||
rawConfig: {
|
||||
type: 'object',
|
||||
title: 'Raw Backup Configuration',
|
||||
properties: {
|
||||
sourcePath: {
|
||||
type: 'string',
|
||||
title: 'Source Path',
|
||||
description: 'Source path to backup',
|
||||
minLength: 1,
|
||||
},
|
||||
excludePatterns: {
|
||||
type: 'array',
|
||||
title: 'Exclude Patterns',
|
||||
description: 'Patterns to exclude from backup',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
default: [],
|
||||
},
|
||||
includePatterns: {
|
||||
type: 'array',
|
||||
title: 'Include Patterns',
|
||||
description: 'Patterns to include in backup',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
default: [],
|
||||
},
|
||||
},
|
||||
required: ['sourcePath'],
|
||||
},
|
||||
zfsConfig: {
|
||||
type: 'object',
|
||||
title: 'ZFS Configuration',
|
||||
properties: {
|
||||
poolName: {
|
||||
type: 'string',
|
||||
title: 'Pool Name',
|
||||
description: 'ZFS pool name',
|
||||
minLength: 1,
|
||||
},
|
||||
datasetName: {
|
||||
type: 'string',
|
||||
title: 'Dataset Name',
|
||||
description: 'ZFS dataset name',
|
||||
minLength: 1,
|
||||
},
|
||||
snapshotPrefix: {
|
||||
type: 'string',
|
||||
title: 'Snapshot Prefix',
|
||||
description: 'Prefix for snapshot names',
|
||||
default: 'backup',
|
||||
},
|
||||
cleanupSnapshots: {
|
||||
type: 'boolean',
|
||||
title: 'Cleanup Snapshots',
|
||||
description: 'Clean up snapshots after backup',
|
||||
default: true,
|
||||
},
|
||||
retainSnapshots: {
|
||||
type: 'integer',
|
||||
title: 'Retain Snapshots',
|
||||
description: 'Number of snapshots to retain',
|
||||
minimum: 0,
|
||||
default: 5,
|
||||
},
|
||||
},
|
||||
required: ['poolName', 'datasetName'],
|
||||
},
|
||||
flashConfig: {
|
||||
type: 'object',
|
||||
title: 'Flash Configuration',
|
||||
properties: {
|
||||
flashPath: {
|
||||
type: 'string',
|
||||
title: 'Flash Path',
|
||||
description: 'Path to flash drive',
|
||||
default: '/boot',
|
||||
},
|
||||
includeGitHistory: {
|
||||
type: 'boolean',
|
||||
title: 'Include Git History',
|
||||
description: 'Include git history in backup',
|
||||
default: true,
|
||||
},
|
||||
additionalPaths: {
|
||||
type: 'array',
|
||||
title: 'Additional Paths',
|
||||
description: 'Additional paths to include',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
default: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
scriptConfig: {
|
||||
type: 'object',
|
||||
title: 'Script Configuration',
|
||||
properties: {
|
||||
scriptPath: {
|
||||
type: 'string',
|
||||
title: 'Script Path',
|
||||
description: 'Path to script file',
|
||||
minLength: 1,
|
||||
},
|
||||
scriptArgs: {
|
||||
type: 'array',
|
||||
title: 'Script Arguments',
|
||||
description: 'Arguments for script',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
default: [],
|
||||
},
|
||||
workingDirectory: {
|
||||
type: 'string',
|
||||
title: 'Working Directory',
|
||||
description: 'Working directory for script',
|
||||
default: '/tmp',
|
||||
},
|
||||
outputPath: {
|
||||
type: 'string',
|
||||
title: 'Output Path',
|
||||
description: 'Path for script output',
|
||||
minLength: 1,
|
||||
},
|
||||
},
|
||||
required: ['scriptPath', 'outputPath'],
|
||||
},
|
||||
},
|
||||
required: ['type'],
|
||||
},
|
||||
};
|
||||
|
||||
// Apply conditional logic for sourceConfig
|
||||
if (sourceConfigProperties.sourceConfig && typeof sourceConfigProperties.sourceConfig === 'object') {
|
||||
sourceConfigProperties.sourceConfig.allOf = [
|
||||
{
|
||||
if: { properties: { type: { const: SourceType.RAW } }, required: ['type'] },
|
||||
then: {
|
||||
required: ['rawConfig'],
|
||||
properties: {
|
||||
zfsConfig: { not: {} },
|
||||
flashConfig: { not: {} },
|
||||
scriptConfig: { not: {} },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
if: { properties: { type: { const: SourceType.ZFS } }, required: ['type'] },
|
||||
then: {
|
||||
required: ['zfsConfig'],
|
||||
properties: {
|
||||
rawConfig: { not: {} },
|
||||
flashConfig: { not: {} },
|
||||
scriptConfig: { not: {} },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
if: { properties: { type: { const: SourceType.FLASH } }, required: ['type'] },
|
||||
then: {
|
||||
required: ['flashConfig'],
|
||||
properties: {
|
||||
rawConfig: { not: {} },
|
||||
zfsConfig: { not: {} },
|
||||
scriptConfig: { not: {} },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
if: { properties: { type: { const: SourceType.SCRIPT } }, required: ['type'] },
|
||||
then: {
|
||||
required: ['scriptConfig'],
|
||||
properties: {
|
||||
rawConfig: { not: {} },
|
||||
zfsConfig: { not: {} },
|
||||
flashConfig: { not: {} },
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
const verticalLayoutElement: UIElement = {
|
||||
type: 'VerticalLayout',
|
||||
elements: sourceConfigElements,
|
||||
options: { step: 1 },
|
||||
};
|
||||
|
||||
return {
|
||||
properties: sourceConfigProperties,
|
||||
elements: [verticalLayoutElement],
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
import { Readable } from 'stream';
|
||||
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
|
||||
export interface BackupSourceConfig {
|
||||
timeout: number;
|
||||
cleanupOnFailure: boolean;
|
||||
}
|
||||
|
||||
export interface BackupSourceResult {
|
||||
success: boolean;
|
||||
outputPath?: string;
|
||||
streamPath?: string;
|
||||
snapshotName?: string;
|
||||
error?: string;
|
||||
cleanupRequired?: boolean;
|
||||
metadata?: Record<string, unknown>;
|
||||
|
||||
// Streaming support
|
||||
streamCommand?: string;
|
||||
streamArgs?: string[];
|
||||
supportsStreaming?: boolean;
|
||||
isStreamingMode?: boolean;
|
||||
}
|
||||
|
||||
export interface BackupSourceProcessorOptions {
|
||||
jobId?: string;
|
||||
onProgress?: (progress: number) => void;
|
||||
onOutput?: (data: string) => void;
|
||||
onError?: (error: string) => void;
|
||||
useStreaming?: boolean;
|
||||
}
|
||||
|
||||
export abstract class BackupSourceProcessor<TConfig extends BackupSourceConfig> {
|
||||
abstract readonly sourceType: SourceType;
|
||||
|
||||
abstract execute(
|
||||
config: TConfig,
|
||||
options?: BackupSourceProcessorOptions
|
||||
): Promise<BackupSourceResult>;
|
||||
|
||||
abstract validate(config: TConfig): Promise<{ valid: boolean; error?: string; warnings?: string[] }>;
|
||||
|
||||
abstract cleanup(result: BackupSourceResult): Promise<void>;
|
||||
|
||||
// Getter to check if processor supports streaming
|
||||
abstract get supportsStreaming(): boolean;
|
||||
|
||||
// Optional getter to get a readable stream for streaming backups
|
||||
get getReadableStream(): ((config: TConfig) => Promise<Readable>) | undefined {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
|
||||
import { BackupSourceService } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.service.js';
|
||||
import { FlashSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.js';
|
||||
import { FlashValidationService } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.js';
|
||||
import { RawSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.js';
|
||||
import { ScriptSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.js';
|
||||
import { ZfsSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.js';
|
||||
import { ZfsValidationService } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.js';
|
||||
|
||||
@Module({
|
||||
providers: [
|
||||
BackupSourceService,
|
||||
FlashSourceProcessor,
|
||||
FlashValidationService,
|
||||
RawSourceProcessor,
|
||||
ScriptSourceProcessor,
|
||||
ZfsSourceProcessor,
|
||||
ZfsValidationService,
|
||||
],
|
||||
exports: [
|
||||
BackupSourceService,
|
||||
FlashSourceProcessor,
|
||||
RawSourceProcessor,
|
||||
ScriptSourceProcessor,
|
||||
ZfsSourceProcessor,
|
||||
ZfsValidationService,
|
||||
],
|
||||
})
|
||||
export class BackupSourceModule {}
|
||||
@@ -0,0 +1,99 @@
|
||||
import { BadRequestException, Injectable, Logger } from '@nestjs/common';
|
||||
import { EventEmitter } from 'events';
|
||||
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
import {
|
||||
BackupSourceConfig,
|
||||
BackupSourceProcessor,
|
||||
BackupSourceProcessorOptions,
|
||||
BackupSourceResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import {
|
||||
FlashSourceConfig,
|
||||
FlashSourceProcessor,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.js';
|
||||
import {
|
||||
RawSourceConfig,
|
||||
RawSourceProcessor,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.js';
|
||||
import {
|
||||
ScriptSourceConfig,
|
||||
ScriptSourceProcessor,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.js';
|
||||
import {
|
||||
ZfsSourceConfig,
|
||||
ZfsSourceProcessor,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.js';
|
||||
|
||||
export interface BackupSourceOptions {
|
||||
jobId?: string;
|
||||
onProgress?: (progress: number) => void;
|
||||
onOutput?: (data: string) => void;
|
||||
onError?: (error: string) => void;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class BackupSourceService extends EventEmitter {
|
||||
private readonly logger = new Logger(BackupSourceService.name);
|
||||
|
||||
constructor(
|
||||
private readonly flashSourceProcessor: FlashSourceProcessor,
|
||||
private readonly rawSourceProcessor: RawSourceProcessor,
|
||||
private readonly scriptSourceProcessor: ScriptSourceProcessor,
|
||||
private readonly zfsSourceProcessor: ZfsSourceProcessor
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
async processSource<T extends BackupSourceConfig & { type: SourceType }>(
|
||||
config: T,
|
||||
options?: BackupSourceOptions
|
||||
): Promise<BackupSourceResult> {
|
||||
const processor = this.getProcessor(config.type);
|
||||
if (!processor) {
|
||||
throw new BadRequestException(`Unsupported source type: ${config.type}`);
|
||||
}
|
||||
|
||||
const processorOptions: BackupSourceProcessorOptions = {
|
||||
jobId: options?.jobId || uuidv4(),
|
||||
onProgress: options?.onProgress,
|
||||
onOutput: options?.onOutput,
|
||||
onError: options?.onError,
|
||||
};
|
||||
|
||||
try {
|
||||
const result = await processor.execute(config, processorOptions);
|
||||
this.logger.log(`Source processing completed for type: ${config.type}`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.logger.error(`Source processing failed for type: ${config.type}`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async cancelSourceJob(jobId: string): Promise<boolean> {
|
||||
this.logger.log(`Attempting to cancel source job: ${jobId}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
async cleanup(): Promise<void> {
|
||||
this.logger.log('Cleaning up source service...');
|
||||
}
|
||||
|
||||
public getProcessor(type: SourceType): BackupSourceProcessor<any> | null {
|
||||
switch (type) {
|
||||
case SourceType.FLASH:
|
||||
return this.flashSourceProcessor;
|
||||
case SourceType.RAW:
|
||||
return this.rawSourceProcessor;
|
||||
case SourceType.SCRIPT:
|
||||
return this.scriptSourceProcessor;
|
||||
case SourceType.ZFS:
|
||||
return this.zfsSourceProcessor;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
import { createUnionType, Field, InputType, ObjectType, registerEnumType } from '@nestjs/graphql';
|
||||
|
||||
import { Type } from 'class-transformer';
|
||||
import { IsBoolean, IsEnum, IsNumber, IsOptional, Min, ValidateNested } from 'class-validator';
|
||||
|
||||
import {
|
||||
FlashPreprocessConfig,
|
||||
FlashPreprocessConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.js';
|
||||
import {
|
||||
RawBackupConfig,
|
||||
RawBackupConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.js';
|
||||
import {
|
||||
ScriptPreprocessConfig,
|
||||
ScriptPreprocessConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/script/script-source.types.js';
|
||||
import {
|
||||
ZfsPreprocessConfig,
|
||||
ZfsPreprocessConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.js';
|
||||
|
||||
export enum SourceType {
|
||||
ZFS = 'ZFS',
|
||||
FLASH = 'FLASH',
|
||||
SCRIPT = 'SCRIPT',
|
||||
RAW = 'RAW',
|
||||
}
|
||||
|
||||
registerEnumType(SourceType, {
|
||||
name: 'SourceType',
|
||||
description:
|
||||
'Type of backup to perform (ZFS snapshot, Flash backup, Custom script, or Raw file backup)',
|
||||
});
|
||||
|
||||
export { ZfsPreprocessConfigInput, ZfsPreprocessConfig };
|
||||
export { FlashPreprocessConfigInput, FlashPreprocessConfig };
|
||||
export { ScriptPreprocessConfigInput, ScriptPreprocessConfig };
|
||||
export { RawBackupConfigInput, RawBackupConfig };
|
||||
|
||||
@InputType()
|
||||
export class SourceConfigInput {
|
||||
@Field(() => SourceType, { nullable: false })
|
||||
@IsEnum(SourceType, { message: 'Invalid source type' })
|
||||
type!: SourceType;
|
||||
|
||||
@Field(() => Number, { description: 'Timeout for backup operation in seconds', defaultValue: 3600 })
|
||||
@IsOptional()
|
||||
@IsNumber()
|
||||
@Min(1)
|
||||
timeout?: number;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether to cleanup on failure', defaultValue: true })
|
||||
@IsOptional()
|
||||
@IsBoolean()
|
||||
cleanupOnFailure?: boolean;
|
||||
|
||||
@Field(() => ZfsPreprocessConfigInput, { nullable: true })
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
@Type(() => ZfsPreprocessConfigInput)
|
||||
zfsConfig?: ZfsPreprocessConfigInput;
|
||||
|
||||
@Field(() => FlashPreprocessConfigInput, { nullable: true })
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
@Type(() => FlashPreprocessConfigInput)
|
||||
flashConfig?: FlashPreprocessConfigInput;
|
||||
|
||||
@Field(() => ScriptPreprocessConfigInput, { nullable: true })
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
@Type(() => ScriptPreprocessConfigInput)
|
||||
scriptConfig?: ScriptPreprocessConfigInput;
|
||||
|
||||
@Field(() => RawBackupConfigInput, { nullable: true })
|
||||
@IsOptional()
|
||||
@ValidateNested()
|
||||
@Type(() => RawBackupConfigInput)
|
||||
rawConfig?: RawBackupConfigInput;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class SourceConfig {
|
||||
@Field(() => Number)
|
||||
timeout!: number;
|
||||
|
||||
@Field(() => Boolean)
|
||||
cleanupOnFailure!: boolean;
|
||||
|
||||
@Field(() => ZfsPreprocessConfig, { nullable: true })
|
||||
zfsConfig?: ZfsPreprocessConfig;
|
||||
|
||||
@Field(() => FlashPreprocessConfig, { nullable: true })
|
||||
flashConfig?: FlashPreprocessConfig;
|
||||
|
||||
@Field(() => ScriptPreprocessConfig, { nullable: true })
|
||||
scriptConfig?: ScriptPreprocessConfig;
|
||||
|
||||
@Field(() => RawBackupConfig, { nullable: true })
|
||||
rawConfig?: RawBackupConfig;
|
||||
}
|
||||
|
||||
export const SourceConfigUnion = createUnionType({
|
||||
name: 'SourceConfigUnion',
|
||||
types: () =>
|
||||
[ZfsPreprocessConfig, FlashPreprocessConfig, ScriptPreprocessConfig, RawBackupConfig] as const,
|
||||
resolveType(obj: any, context, info) {
|
||||
if (ZfsPreprocessConfig.isTypeOf && ZfsPreprocessConfig.isTypeOf(obj)) {
|
||||
return ZfsPreprocessConfig;
|
||||
}
|
||||
if (FlashPreprocessConfig.isTypeOf && FlashPreprocessConfig.isTypeOf(obj)) {
|
||||
return FlashPreprocessConfig;
|
||||
}
|
||||
if (ScriptPreprocessConfig.isTypeOf && ScriptPreprocessConfig.isTypeOf(obj)) {
|
||||
return ScriptPreprocessConfig;
|
||||
}
|
||||
if (RawBackupConfig.isTypeOf && RawBackupConfig.isTypeOf(obj)) {
|
||||
return RawBackupConfig;
|
||||
}
|
||||
console.error(`[SourceConfigUnion] Could not resolve type for object: ${JSON.stringify(obj)}`);
|
||||
return null;
|
||||
},
|
||||
});
|
||||
|
||||
export const SourceConfigInputUnion = SourceConfigInput;
|
||||
|
||||
export interface PreprocessResult {
|
||||
success: boolean;
|
||||
streamPath?: string;
|
||||
outputPath?: string;
|
||||
snapshotName?: string;
|
||||
error?: string;
|
||||
cleanupRequired?: boolean;
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
import { Field, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { IsOptional, IsString } from 'class-validator';
|
||||
|
||||
@InputType()
|
||||
export abstract class BaseSourceConfigInput {
|
||||
@Field(() => String, {
|
||||
description: 'Human-readable label for this source configuration',
|
||||
nullable: true,
|
||||
})
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
label?: string;
|
||||
}
|
||||
|
||||
export interface BaseSourceConfig {
|
||||
label: string;
|
||||
}
|
||||
@@ -0,0 +1,307 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { access, mkdir, writeFile } from 'fs/promises';
|
||||
import { dirname, join } from 'path';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import {
|
||||
BackupSourceConfig,
|
||||
BackupSourceProcessor,
|
||||
BackupSourceProcessorOptions,
|
||||
BackupSourceResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { FlashPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.js';
|
||||
import { FlashValidationService } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.js';
|
||||
|
||||
export interface FlashSourceConfig extends BackupSourceConfig {
|
||||
flashPath: string;
|
||||
includeGitHistory: boolean;
|
||||
additionalPaths?: string[];
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class FlashSourceProcessor extends BackupSourceProcessor<FlashSourceConfig> {
|
||||
readonly sourceType = SourceType.FLASH;
|
||||
private readonly logger = new Logger(FlashSourceProcessor.name);
|
||||
|
||||
constructor(private readonly flashValidationService: FlashValidationService) {
|
||||
super();
|
||||
}
|
||||
|
||||
async execute(
|
||||
config: FlashSourceConfig,
|
||||
options?: BackupSourceProcessorOptions
|
||||
): Promise<BackupSourceResult> {
|
||||
const validation = await this.validate(config);
|
||||
if (!validation.valid) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Flash configuration validation failed: ${validation.error}`,
|
||||
metadata: { validationError: validation.error, validationWarnings: validation.warnings },
|
||||
};
|
||||
}
|
||||
|
||||
if (validation.warnings?.length) {
|
||||
this.logger.warn(`Flash backup warnings: ${validation.warnings.join(', ')}`);
|
||||
}
|
||||
|
||||
const tempGitPath = join(config.flashPath, '.git-backup-temp');
|
||||
let gitRepoInitialized = false;
|
||||
|
||||
try {
|
||||
if (config.includeGitHistory) {
|
||||
gitRepoInitialized = await this.initializeGitRepository(config.flashPath, tempGitPath);
|
||||
if (gitRepoInitialized) {
|
||||
this.logger.log(`Initialized git repository for Flash backup at: ${tempGitPath}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Generate streaming command for tar compression
|
||||
const streamCommand = this.generateStreamCommand(config, gitRepoInitialized, tempGitPath);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
outputPath: config.flashPath,
|
||||
streamPath: config.flashPath,
|
||||
metadata: {
|
||||
flashPath: config.flashPath,
|
||||
gitHistoryIncluded: config.includeGitHistory && gitRepoInitialized,
|
||||
additionalPaths: config.additionalPaths,
|
||||
validationWarnings: validation.warnings,
|
||||
tempGitPath: gitRepoInitialized ? tempGitPath : undefined,
|
||||
streamCommand: streamCommand.command,
|
||||
streamArgs: streamCommand.args,
|
||||
sourceType: this.sourceType,
|
||||
},
|
||||
cleanupRequired: gitRepoInitialized,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Flash backup failed: ${errorMessage}`, error);
|
||||
|
||||
if (gitRepoInitialized) {
|
||||
try {
|
||||
await this.cleanupTempGitRepo(tempGitPath);
|
||||
this.logger.log(`Cleaned up temporary git repository after failure: ${tempGitPath}`);
|
||||
} catch (cleanupError) {
|
||||
const cleanupErrorMessage =
|
||||
cleanupError instanceof Error ? cleanupError.message : String(cleanupError);
|
||||
this.logger.error(
|
||||
`Failed to cleanup temporary git repository: ${cleanupErrorMessage}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
cleanupRequired: gitRepoInitialized,
|
||||
metadata: {
|
||||
flashPath: config.flashPath,
|
||||
gitRepoInitialized,
|
||||
cleanupAttempted: gitRepoInitialized,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async validate(
|
||||
config: FlashSourceConfig
|
||||
): Promise<{ valid: boolean; error?: string; warnings?: string[] }> {
|
||||
const legacyConfig: FlashPreprocessConfigInput = {
|
||||
flashPath: config.flashPath,
|
||||
includeGitHistory: config.includeGitHistory,
|
||||
additionalPaths: config.additionalPaths,
|
||||
};
|
||||
|
||||
const validationResult = await this.flashValidationService.validateFlashConfig(legacyConfig);
|
||||
|
||||
return {
|
||||
valid: validationResult.isValid,
|
||||
error: validationResult.errors.length > 0 ? validationResult.errors.join(', ') : undefined,
|
||||
warnings: validationResult.warnings,
|
||||
};
|
||||
}
|
||||
|
||||
async cleanup(result: BackupSourceResult): Promise<void> {
|
||||
if (result.cleanupRequired && result.metadata?.tempGitPath) {
|
||||
await this.cleanupTempGitRepo(result.metadata.tempGitPath as string);
|
||||
}
|
||||
}
|
||||
|
||||
private async initializeGitRepository(flashPath: string, tempGitPath: string): Promise<boolean> {
|
||||
try {
|
||||
const existingGitPath = join(flashPath, '.git');
|
||||
const hasExistingRepo = await this.flashValidationService.validateGitRepository(flashPath);
|
||||
|
||||
if (hasExistingRepo) {
|
||||
await execa('cp', ['-r', existingGitPath, tempGitPath]);
|
||||
this.logger.log('Copied existing git repository to temporary location');
|
||||
return true;
|
||||
}
|
||||
|
||||
await mkdir(tempGitPath, { recursive: true });
|
||||
await execa('git', ['init'], { cwd: tempGitPath });
|
||||
|
||||
const gitignorePath = join(tempGitPath, '.gitignore');
|
||||
const gitignoreContent = [
|
||||
'# Exclude sensitive files',
|
||||
'*.key',
|
||||
'*.pem',
|
||||
'*.p12',
|
||||
'*.pfx',
|
||||
'config/passwd',
|
||||
'config/shadow',
|
||||
'config/ssh/',
|
||||
'config/ssl/',
|
||||
'config/wireguard/',
|
||||
'config/network.cfg',
|
||||
'config/ident.cfg',
|
||||
].join('\n');
|
||||
|
||||
await writeFile(gitignorePath, gitignoreContent);
|
||||
|
||||
await execa('git', ['add', '.'], { cwd: flashPath });
|
||||
await execa(
|
||||
'git',
|
||||
[
|
||||
'-c',
|
||||
'user.name=Unraid Backup',
|
||||
'-c',
|
||||
'user.email=backup@unraid.net',
|
||||
'commit',
|
||||
'-m',
|
||||
'Flash backup snapshot',
|
||||
],
|
||||
{ cwd: flashPath }
|
||||
);
|
||||
|
||||
await execa('mv', [join(flashPath, '.git'), tempGitPath]);
|
||||
|
||||
this.logger.log('Initialized new git repository for Flash backup');
|
||||
return true;
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.warn(`Failed to initialize git repository: ${errorMessage}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private async cleanupTempGitRepo(tempGitPath: string): Promise<void> {
|
||||
try {
|
||||
await execa('rm', ['-rf', tempGitPath]);
|
||||
this.logger.log(`Cleaned up temporary git repository: ${tempGitPath}`);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Failed to cleanup temporary git repository: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
|
||||
private generateStreamCommand(
|
||||
config: FlashSourceConfig,
|
||||
gitRepoInitialized: boolean,
|
||||
tempGitPath?: string
|
||||
): { command: string; args: string[] } {
|
||||
const excludeArgs: string[] = [];
|
||||
|
||||
// Standard exclusions for flash backups
|
||||
const standardExcludes = ['lost+found', '*.tmp', '*.temp', '.DS_Store', 'Thumbs.db'];
|
||||
|
||||
standardExcludes.forEach((pattern) => {
|
||||
excludeArgs.push('--exclude', pattern);
|
||||
});
|
||||
|
||||
// If git repo was initialized, include it in the backup
|
||||
if (gitRepoInitialized && tempGitPath) {
|
||||
excludeArgs.push('--exclude', '.git-backup-temp');
|
||||
}
|
||||
|
||||
const tarArgs = [
|
||||
'-czf', // create, gzip, file
|
||||
'-', // output to stdout for streaming
|
||||
'-C', // change to directory
|
||||
config.flashPath,
|
||||
...excludeArgs,
|
||||
'.', // backup everything in the directory
|
||||
];
|
||||
|
||||
// Add additional paths if specified
|
||||
if (config.additionalPaths?.length) {
|
||||
config.additionalPaths.forEach((path) => {
|
||||
tarArgs.push('-C', path, '.');
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
command: 'tar',
|
||||
args: tarArgs,
|
||||
};
|
||||
}
|
||||
|
||||
get supportsStreaming(): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
get getReadableStream(): (config: FlashSourceConfig) => Promise<Readable> {
|
||||
return async (config: FlashSourceConfig): Promise<Readable> => {
|
||||
const validation = await this.validate(config);
|
||||
if (!validation.valid) {
|
||||
const errorMsg = `Flash configuration validation failed: ${validation.error}`;
|
||||
this.logger.error(errorMsg);
|
||||
const errorStream = new Readable({
|
||||
read() {
|
||||
this.emit('error', new Error(errorMsg));
|
||||
this.push(null);
|
||||
},
|
||||
});
|
||||
return errorStream;
|
||||
}
|
||||
|
||||
const { command, args } = this.generateStreamCommand(config, false);
|
||||
|
||||
this.logger.log(
|
||||
`[getReadableStream] Streaming flash backup with command: ${command} ${args.join(' ')}`
|
||||
);
|
||||
|
||||
try {
|
||||
const tarProcess = execa(command, args, {
|
||||
cwd: config.flashPath,
|
||||
});
|
||||
|
||||
tarProcess.catch((error) => {
|
||||
this.logger.error(
|
||||
`Error executing tar command for streaming: ${error.message}`,
|
||||
error.stack
|
||||
);
|
||||
});
|
||||
|
||||
if (!tarProcess.stdout) {
|
||||
throw new Error('Failed to get stdout stream from tar process.');
|
||||
}
|
||||
|
||||
tarProcess.stdout.on('end', () => {
|
||||
this.logger.log('[getReadableStream] Tar process stdout stream ended.');
|
||||
});
|
||||
tarProcess.stdout.on('error', (err) => {
|
||||
this.logger.error(
|
||||
`[getReadableStream] Tar process stdout stream error: ${err.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return tarProcess.stdout;
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`[getReadableStream] Failed to start tar process: ${errorMessage}`);
|
||||
const errorStream = new Readable({
|
||||
read() {
|
||||
this.emit('error', new Error(errorMessage));
|
||||
this.push(null);
|
||||
},
|
||||
});
|
||||
return errorStream;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
import { Field, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { IsBoolean, IsNotEmpty, IsOptional, IsString } from 'class-validator';
|
||||
|
||||
import {
|
||||
BaseSourceConfig,
|
||||
BaseSourceConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js';
|
||||
|
||||
@InputType()
|
||||
export class FlashPreprocessConfigInput extends BaseSourceConfigInput {
|
||||
@Field(() => String, { description: 'Flash drive mount path', defaultValue: '/boot' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
flashPath!: string;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether to include git history', defaultValue: true })
|
||||
@IsBoolean()
|
||||
includeGitHistory!: boolean;
|
||||
|
||||
@Field(() => [String], { description: 'Additional paths to include in backup', nullable: true })
|
||||
@IsOptional()
|
||||
additionalPaths?: string[];
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class FlashPreprocessConfig implements BaseSourceConfig {
|
||||
@Field(() => String, { nullable: false })
|
||||
label: string = 'Flash drive backup';
|
||||
|
||||
@Field(() => String)
|
||||
flashPath!: string;
|
||||
|
||||
@Field(() => Boolean)
|
||||
includeGitHistory!: boolean;
|
||||
|
||||
@Field(() => [String], { nullable: true })
|
||||
additionalPaths?: string[];
|
||||
|
||||
static isTypeOf(obj: any): obj is FlashPreprocessConfig {
|
||||
return obj && typeof obj.flashPath === 'string' && typeof obj.includeGitHistory === 'boolean';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,260 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { access, constants, readdir, stat } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import { FlashPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.js';
|
||||
|
||||
export interface FlashValidationResult {
|
||||
isValid: boolean;
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
metadata: {
|
||||
flashPathExists?: boolean;
|
||||
flashPathMounted?: boolean;
|
||||
gitRepoExists?: boolean;
|
||||
gitRepoSize?: number | null;
|
||||
additionalPathsValid?: boolean[];
|
||||
totalSize?: number | null;
|
||||
availableSpace?: number | null;
|
||||
};
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class FlashValidationService {
|
||||
private readonly logger = new Logger(FlashValidationService.name);
|
||||
|
||||
async validateFlashConfig(config: FlashPreprocessConfigInput): Promise<FlashValidationResult> {
|
||||
const result: FlashValidationResult = {
|
||||
isValid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
try {
|
||||
// Validate flash path exists and is accessible
|
||||
const flashPathValid = await this.validateFlashPath(config.flashPath);
|
||||
result.metadata.flashPathExists = flashPathValid;
|
||||
|
||||
if (!flashPathValid) {
|
||||
result.errors.push(
|
||||
`Flash path '${config.flashPath}' does not exist or is not accessible`
|
||||
);
|
||||
result.isValid = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check if flash path is mounted
|
||||
const isMounted = await this.isFlashMounted(config.flashPath);
|
||||
result.metadata.flashPathMounted = isMounted;
|
||||
|
||||
if (!isMounted) {
|
||||
result.warnings.push(`Flash path '${config.flashPath}' may not be properly mounted`);
|
||||
}
|
||||
|
||||
// Validate git repository if includeGitHistory is enabled
|
||||
if (config.includeGitHistory) {
|
||||
const gitRepoExists = await this.validateGitRepository(config.flashPath);
|
||||
result.metadata.gitRepoExists = gitRepoExists;
|
||||
|
||||
if (!gitRepoExists) {
|
||||
result.warnings.push(
|
||||
`Git repository not found in '${config.flashPath}'. Git history will be skipped.`
|
||||
);
|
||||
} else {
|
||||
const gitRepoSize = await this.getGitRepositorySize(config.flashPath);
|
||||
result.metadata.gitRepoSize = gitRepoSize;
|
||||
|
||||
if (gitRepoSize && gitRepoSize > 100 * 1024 * 1024) {
|
||||
// 100MB
|
||||
result.warnings.push(
|
||||
`Git repository is large (${Math.round(gitRepoSize / 1024 / 1024)}MB). Backup may take longer.`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate additional paths
|
||||
if (config.additionalPaths && config.additionalPaths.length > 0) {
|
||||
const pathValidations = await Promise.all(
|
||||
config.additionalPaths.map((path) => this.validateAdditionalPath(path))
|
||||
);
|
||||
result.metadata.additionalPathsValid = pathValidations;
|
||||
|
||||
const invalidPaths = config.additionalPaths.filter(
|
||||
(_, index) => !pathValidations[index]
|
||||
);
|
||||
if (invalidPaths.length > 0) {
|
||||
result.warnings.push(
|
||||
`Some additional paths are not accessible: ${invalidPaths.join(', ')}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate total backup size
|
||||
const totalSize = await this.calculateTotalBackupSize(config);
|
||||
result.metadata.totalSize = totalSize;
|
||||
|
||||
// Check available space
|
||||
const availableSpace = await this.getAvailableSpace(config.flashPath);
|
||||
result.metadata.availableSpace = availableSpace;
|
||||
|
||||
if (totalSize && availableSpace && totalSize > availableSpace * 0.8) {
|
||||
result.warnings.push(
|
||||
'Backup size may be close to available space. Monitor disk usage during backup.'
|
||||
);
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
result.errors.push(`Validation failed: ${errorMessage}`);
|
||||
result.isValid = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async validateFlashPath(flashPath: string): Promise<boolean> {
|
||||
try {
|
||||
await access(flashPath);
|
||||
const stats = await stat(flashPath);
|
||||
return stats.isDirectory();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async isFlashMounted(flashPath: string): Promise<boolean> {
|
||||
try {
|
||||
// Check if the path is a mount point by comparing device IDs
|
||||
const pathStat = await stat(flashPath);
|
||||
const parentStat = await stat(join(flashPath, '..'));
|
||||
return pathStat.dev !== parentStat.dev;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async validateGitRepository(flashPath: string): Promise<boolean> {
|
||||
const gitPath = join(flashPath, '.git');
|
||||
try {
|
||||
await access(gitPath);
|
||||
const stats = await stat(gitPath);
|
||||
return stats.isDirectory();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async getGitRepositorySize(flashPath: string): Promise<number | null> {
|
||||
const gitPath = join(flashPath, '.git');
|
||||
try {
|
||||
const { stdout } = await execa('du', ['-sb', gitPath]);
|
||||
const size = parseInt(stdout.split('\t')[0], 10);
|
||||
return isNaN(size) ? null : size;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async validateAdditionalPath(path: string): Promise<boolean> {
|
||||
try {
|
||||
await access(path);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async calculateTotalBackupSize(config: FlashPreprocessConfigInput): Promise<number | null> {
|
||||
try {
|
||||
let totalSize = 0;
|
||||
|
||||
// Get flash directory size
|
||||
const { stdout: flashSize } = await execa('du', ['-sb', config.flashPath]);
|
||||
totalSize += parseInt(flashSize.split('\t')[0], 10) || 0;
|
||||
|
||||
// Add additional paths if specified
|
||||
if (config.additionalPaths) {
|
||||
for (const path of config.additionalPaths) {
|
||||
try {
|
||||
const { stdout: pathSize } = await execa('du', ['-sb', path]);
|
||||
totalSize += parseInt(pathSize.split('\t')[0], 10) || 0;
|
||||
} catch (error: unknown) {
|
||||
this.logger.warn(
|
||||
`Failed to get size for additional path ${path}: ${error instanceof Error ? error.message : String(error)}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalSize;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async getAvailableSpace(path: string): Promise<number | null> {
|
||||
try {
|
||||
const { stdout } = await execa('df', ['-B1', path]);
|
||||
const lines = stdout.split('\n');
|
||||
if (lines.length > 1) {
|
||||
const fields = lines[1].split(/\s+/);
|
||||
if (fields.length >= 4) {
|
||||
const available = parseInt(fields[3], 10);
|
||||
return isNaN(available) ? null : available;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async checkGitStatus(flashPath: string): Promise<{
|
||||
hasUncommittedChanges: boolean;
|
||||
currentBranch: string | null;
|
||||
lastCommitHash: string | null;
|
||||
}> {
|
||||
const result = {
|
||||
hasUncommittedChanges: false,
|
||||
currentBranch: null as string | null,
|
||||
lastCommitHash: null as string | null,
|
||||
};
|
||||
|
||||
try {
|
||||
// Check for uncommitted changes
|
||||
const { stdout: statusOutput } = await execa('git', ['status', '--porcelain'], {
|
||||
cwd: flashPath,
|
||||
});
|
||||
result.hasUncommittedChanges = statusOutput.trim().length > 0;
|
||||
|
||||
// Get current branch
|
||||
try {
|
||||
const { stdout: branchOutput } = await execa(
|
||||
'git',
|
||||
['rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
{ cwd: flashPath }
|
||||
);
|
||||
result.currentBranch = branchOutput.trim();
|
||||
} catch {
|
||||
// Ignore branch detection errors
|
||||
}
|
||||
|
||||
// Get last commit hash
|
||||
try {
|
||||
const { stdout: commitOutput } = await execa('git', ['rev-parse', 'HEAD'], {
|
||||
cwd: flashPath,
|
||||
});
|
||||
result.lastCommitHash = commitOutput.trim();
|
||||
} catch {
|
||||
// Ignore commit hash detection errors
|
||||
}
|
||||
} catch {
|
||||
// Git commands failed, repository might not be initialized
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { access, constants, stat } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
import {
|
||||
BackupSourceConfig,
|
||||
BackupSourceProcessor,
|
||||
BackupSourceProcessorOptions,
|
||||
BackupSourceResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { RawBackupConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.js';
|
||||
|
||||
export interface RawSourceConfig extends BackupSourceConfig {
|
||||
sourcePath: string;
|
||||
excludePatterns?: string[];
|
||||
includePatterns?: string[];
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class RawSourceProcessor extends BackupSourceProcessor<RawSourceConfig> {
|
||||
readonly sourceType = SourceType.RAW;
|
||||
private readonly logger = new Logger(RawSourceProcessor.name);
|
||||
|
||||
get supportsStreaming(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
async execute(
|
||||
config: RawSourceConfig,
|
||||
options?: BackupSourceProcessorOptions
|
||||
): Promise<BackupSourceResult> {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
this.logger.log(`Starting RAW backup validation for path: ${config.sourcePath}`);
|
||||
|
||||
const validation = await this.validate(config);
|
||||
if (!validation.valid) {
|
||||
return {
|
||||
success: false,
|
||||
error: validation.error || 'Validation failed',
|
||||
metadata: {
|
||||
validationError: validation.error,
|
||||
supportsStreaming: this.supportsStreaming,
|
||||
},
|
||||
supportsStreaming: this.supportsStreaming,
|
||||
};
|
||||
}
|
||||
|
||||
if (validation.warnings?.length) {
|
||||
this.logger.warn(
|
||||
`RAW backup warnings for ${config.sourcePath}: ${validation.warnings.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
const sourceStats = await stat(config.sourcePath);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
this.logger.log(`RAW backup: Providing direct path for ${config.sourcePath}`);
|
||||
return {
|
||||
success: true,
|
||||
outputPath: config.sourcePath,
|
||||
supportsStreaming: this.supportsStreaming,
|
||||
isStreamingMode: false,
|
||||
metadata: {
|
||||
sourcePath: config.sourcePath,
|
||||
isDirectory: sourceStats.isDirectory(),
|
||||
size: sourceStats.size,
|
||||
duration,
|
||||
excludePatterns: config.excludePatterns,
|
||||
includePatterns: config.includePatterns,
|
||||
validationWarnings: validation.warnings,
|
||||
supportsStreaming: this.supportsStreaming,
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorStack = error instanceof Error ? error.stack : undefined;
|
||||
this.logger.error(
|
||||
`RAW backup preparation failed for ${config.sourcePath}: ${errorMessage}`,
|
||||
errorStack
|
||||
);
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
supportsStreaming: this.supportsStreaming,
|
||||
metadata: {
|
||||
sourcePath: config.sourcePath,
|
||||
duration: Date.now() - startTime,
|
||||
supportsStreaming: this.supportsStreaming,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async validate(
|
||||
config: RawSourceConfig
|
||||
): Promise<{ valid: boolean; error?: string; warnings?: string[] }> {
|
||||
const warnings: string[] = [];
|
||||
|
||||
try {
|
||||
await access(config.sourcePath, constants.F_OK | constants.R_OK);
|
||||
} catch {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Source path does not exist or is not readable: ${config.sourcePath}`,
|
||||
};
|
||||
}
|
||||
|
||||
const restrictedPaths = ['/proc', '/sys', '/dev'];
|
||||
const isRestricted = restrictedPaths.some((path) => config.sourcePath.startsWith(path));
|
||||
if (isRestricted) {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Cannot backup restricted system paths: ${config.sourcePath}`,
|
||||
};
|
||||
}
|
||||
|
||||
if (config.excludePatterns?.length && config.includePatterns?.length) {
|
||||
warnings.push(
|
||||
'Both include and exclude patterns specified - exclude patterns take precedence'
|
||||
);
|
||||
}
|
||||
|
||||
const stats = await stat(config.sourcePath);
|
||||
if (stats.isDirectory()) {
|
||||
const largeDirPaths = ['/mnt/user', '/mnt/disk'];
|
||||
const isLargeDir = largeDirPaths.some((path) => config.sourcePath.startsWith(path));
|
||||
if (isLargeDir && !config.excludePatterns?.length && !config.includePatterns?.length) {
|
||||
warnings.push(
|
||||
'Backing up large directory without filters may take significant time and space'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return { valid: true, warnings };
|
||||
}
|
||||
|
||||
async cleanup(result: BackupSourceResult): Promise<void> {
|
||||
this.logger.log(`RAW backup cleanup completed for: ${result.metadata?.sourcePath}`);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
import { Field, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { IsArray, IsNotEmpty, IsOptional, IsString } from 'class-validator';
|
||||
|
||||
import {
|
||||
BaseSourceConfig,
|
||||
BaseSourceConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js';
|
||||
|
||||
@InputType()
|
||||
export class RawBackupConfigInput extends BaseSourceConfigInput {
|
||||
@Field(() => String, { description: 'Source path to backup' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
sourcePath!: string;
|
||||
|
||||
@Field(() => [String], { description: 'File patterns to exclude from backup', nullable: true })
|
||||
@IsOptional()
|
||||
@IsArray()
|
||||
excludePatterns?: string[];
|
||||
|
||||
@Field(() => [String], { description: 'File patterns to include in backup', nullable: true })
|
||||
@IsOptional()
|
||||
@IsArray()
|
||||
includePatterns?: string[];
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class RawBackupConfig implements BaseSourceConfig {
|
||||
@Field(() => String, { nullable: false })
|
||||
label: string = 'Raw file backup';
|
||||
|
||||
@Field(() => String)
|
||||
sourcePath!: string;
|
||||
|
||||
@Field(() => [String], { nullable: true })
|
||||
excludePatterns?: string[];
|
||||
|
||||
@Field(() => [String], { nullable: true })
|
||||
includePatterns?: string[];
|
||||
|
||||
static isTypeOf(obj: any): obj is RawBackupConfig {
|
||||
return obj && typeof obj.sourcePath === 'string';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,252 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { promises as fs } from 'fs';
|
||||
import { dirname } from 'path';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import {
|
||||
BackupSourceConfig,
|
||||
BackupSourceProcessor,
|
||||
BackupSourceProcessorOptions,
|
||||
BackupSourceResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
|
||||
export interface ScriptSourceConfig extends BackupSourceConfig {
|
||||
scriptPath: string;
|
||||
scriptArgs?: string[];
|
||||
workingDirectory?: string;
|
||||
environment?: Record<string, string>;
|
||||
outputPath: string;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class ScriptSourceProcessor extends BackupSourceProcessor<ScriptSourceConfig> {
|
||||
readonly sourceType = SourceType.SCRIPT;
|
||||
private readonly logger = new Logger(ScriptSourceProcessor.name);
|
||||
private readonly tempDir = '/tmp/unraid-script-preprocessing';
|
||||
private readonly maxOutputSize = 100 * 1024 * 1024; // 100MB limit
|
||||
|
||||
get supportsStreaming(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
async execute(
|
||||
config: ScriptSourceConfig,
|
||||
options?: BackupSourceProcessorOptions
|
||||
): Promise<BackupSourceResult> {
|
||||
const startTime = Date.now();
|
||||
|
||||
const validation = await this.validate(config);
|
||||
if (!validation.valid) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Script configuration validation failed: ${validation.error}`,
|
||||
metadata: { validationError: validation.error, validationWarnings: validation.warnings },
|
||||
};
|
||||
}
|
||||
|
||||
if (validation.warnings?.length) {
|
||||
this.logger.warn(`Script backup warnings: ${validation.warnings.join(', ')}`);
|
||||
}
|
||||
|
||||
try {
|
||||
await this.ensureTempDirectory();
|
||||
|
||||
const { command, args } = this.buildCommand(config);
|
||||
|
||||
this.logger.log(`Executing script: ${command} ${args.join(' ')}`);
|
||||
|
||||
await this.runScriptWithTimeout(command, args, config.timeout / 1000);
|
||||
|
||||
const outputSize = await this.getFileSize(config.outputPath);
|
||||
if (outputSize === 0) {
|
||||
throw new Error('Script produced no output');
|
||||
}
|
||||
|
||||
if (outputSize > this.maxOutputSize) {
|
||||
throw new Error(
|
||||
`Script output too large: ${outputSize} bytes (max: ${this.maxOutputSize})`
|
||||
);
|
||||
}
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
this.logger.log(
|
||||
`Script completed successfully in ${duration}ms, output size: ${outputSize} bytes`
|
||||
);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
outputPath: config.outputPath,
|
||||
metadata: {
|
||||
scriptPath: config.scriptPath,
|
||||
duration,
|
||||
outputSize,
|
||||
workingDirectory: config.workingDirectory,
|
||||
scriptArgs: config.scriptArgs,
|
||||
validationWarnings: validation.warnings,
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Script backup failed: ${errorMessage}`);
|
||||
|
||||
try {
|
||||
await fs.unlink(config.outputPath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
metadata: {
|
||||
scriptPath: config.scriptPath,
|
||||
duration: Date.now() - startTime,
|
||||
workingDirectory: config.workingDirectory,
|
||||
scriptArgs: config.scriptArgs,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async validate(
|
||||
config: ScriptSourceConfig
|
||||
): Promise<{ valid: boolean; error?: string; warnings?: string[] }> {
|
||||
try {
|
||||
await fs.access(config.scriptPath, fs.constants.F_OK | fs.constants.X_OK);
|
||||
|
||||
const restrictedPaths = ['/boot', '/mnt/user', '/mnt/disk'];
|
||||
const isRestricted = restrictedPaths.some((path) => config.scriptPath.startsWith(path));
|
||||
|
||||
if (isRestricted) {
|
||||
return {
|
||||
valid: false,
|
||||
error: 'Script cannot be located in restricted paths (/boot, /mnt/user, /mnt/disk*)',
|
||||
};
|
||||
}
|
||||
|
||||
if (config.workingDirectory) {
|
||||
try {
|
||||
await fs.access(config.workingDirectory, fs.constants.F_OK);
|
||||
} catch {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Working directory does not exist: ${config.workingDirectory}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const outputDir = dirname(config.outputPath);
|
||||
try {
|
||||
await fs.access(outputDir, fs.constants.F_OK | fs.constants.W_OK);
|
||||
} catch {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Output directory does not exist or is not writable: ${outputDir}`,
|
||||
};
|
||||
}
|
||||
|
||||
if (config.scriptArgs) {
|
||||
for (const arg of config.scriptArgs) {
|
||||
if (arg.length > 1000) {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Script argument too long (max 1000 characters): ${arg.substring(0, 50)}...`,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { valid: true };
|
||||
} catch {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Script does not exist or is not executable: ${config.scriptPath}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup(result: BackupSourceResult): Promise<void> {
|
||||
if (result.outputPath) {
|
||||
await this.cleanupFile(result.outputPath);
|
||||
}
|
||||
}
|
||||
|
||||
private async ensureTempDirectory(): Promise<void> {
|
||||
try {
|
||||
await fs.access(this.tempDir);
|
||||
} catch {
|
||||
await fs.mkdir(this.tempDir, { recursive: true, mode: 0o700 });
|
||||
}
|
||||
}
|
||||
|
||||
private buildCommand(config: ScriptSourceConfig): { command: string; args: string[] } {
|
||||
const command = 'timeout';
|
||||
const args = [
|
||||
`${config.timeout / 1000}s`,
|
||||
'nice',
|
||||
'-n',
|
||||
'10',
|
||||
'ionice',
|
||||
'-c',
|
||||
'3',
|
||||
'bash',
|
||||
'-c',
|
||||
`cd "${config.workingDirectory || '/tmp'}" && exec "${config.scriptPath}" ${(config.scriptArgs || []).join(' ')}`,
|
||||
];
|
||||
|
||||
return { command, args };
|
||||
}
|
||||
|
||||
private async runScriptWithTimeout(
|
||||
command: string,
|
||||
args: string[],
|
||||
timeoutSeconds: number
|
||||
): Promise<void> {
|
||||
try {
|
||||
await execa(command, args, {
|
||||
timeout: timeoutSeconds * 1000,
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
env: {
|
||||
...process.env,
|
||||
PATH: '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
},
|
||||
uid: 99, // nobody user
|
||||
gid: 99, // nobody group
|
||||
});
|
||||
} catch (error: any) {
|
||||
if (error.timedOut) {
|
||||
throw new Error(`Script timeout after ${timeoutSeconds} seconds`);
|
||||
}
|
||||
if (error.signal) {
|
||||
throw new Error(`Script killed by signal: ${error.signal}`);
|
||||
}
|
||||
if (error.exitCode !== undefined && error.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Script exited with code ${error.exitCode}. stderr: ${error.stderr || ''}`
|
||||
);
|
||||
}
|
||||
throw new Error(`Failed to execute script: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
private async getFileSize(filePath: string): Promise<number> {
|
||||
try {
|
||||
const stats = await fs.stat(filePath);
|
||||
return stats.size;
|
||||
} catch {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private async cleanupFile(filePath: string): Promise<void> {
|
||||
try {
|
||||
await fs.unlink(filePath);
|
||||
this.logger.log(`Cleaned up script output file: ${filePath}`);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`Failed to cleanup script output ${filePath}: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
import { Field, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { IsNotEmpty, IsOptional, IsString } from 'class-validator';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
|
||||
import {
|
||||
BaseSourceConfig,
|
||||
BaseSourceConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js';
|
||||
|
||||
@InputType()
|
||||
export class ScriptPreprocessConfigInput extends BaseSourceConfigInput {
|
||||
@Field(() => String, { description: 'Path to the script file' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
scriptPath!: string;
|
||||
|
||||
@Field(() => [String], { description: 'Arguments to pass to the script', nullable: true })
|
||||
@IsOptional()
|
||||
scriptArgs?: string[];
|
||||
|
||||
@Field(() => String, { description: 'Working directory for script execution', nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
workingDirectory?: string;
|
||||
|
||||
@Field(() => GraphQLJSON, {
|
||||
description: 'Environment variables for script execution',
|
||||
nullable: true,
|
||||
})
|
||||
@IsOptional()
|
||||
environment?: Record<string, string>;
|
||||
|
||||
@Field(() => String, { description: 'Output file path where script should write data' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
outputPath!: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class ScriptPreprocessConfig implements BaseSourceConfig {
|
||||
@Field(() => String, { nullable: false })
|
||||
label: string = 'Script backup';
|
||||
|
||||
@Field(() => String)
|
||||
scriptPath!: string;
|
||||
|
||||
@Field(() => [String], { nullable: true })
|
||||
scriptArgs?: string[];
|
||||
|
||||
@Field(() => String, { nullable: true })
|
||||
workingDirectory?: string;
|
||||
|
||||
@Field(() => GraphQLJSON, { nullable: true })
|
||||
environment?: Record<string, string>;
|
||||
|
||||
@Field(() => String)
|
||||
outputPath!: string;
|
||||
|
||||
static isTypeOf(obj: any): obj is ScriptPreprocessConfig {
|
||||
return obj && typeof obj.scriptPath === 'string' && typeof obj.outputPath === 'string';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,285 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { access, constants, stat } from 'fs/promises';
|
||||
import { dirname, isAbsolute, resolve } from 'path';
|
||||
|
||||
import { ScriptPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/script/script-source.types.js';
|
||||
|
||||
export interface ScriptValidationResult {
|
||||
isValid: boolean;
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
metadata: {
|
||||
scriptExists?: boolean;
|
||||
scriptExecutable?: boolean;
|
||||
workingDirectoryExists?: boolean;
|
||||
outputDirectoryExists?: boolean;
|
||||
outputDirectoryWritable?: boolean;
|
||||
environmentVariablesValid?: boolean;
|
||||
resolvedScriptPath?: string;
|
||||
resolvedWorkingDirectory?: string;
|
||||
resolvedOutputPath?: string;
|
||||
};
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class ScriptValidationService {
|
||||
private readonly logger = new Logger(ScriptValidationService.name);
|
||||
|
||||
async validateScriptConfig(config: ScriptPreprocessConfigInput): Promise<ScriptValidationResult> {
|
||||
const result: ScriptValidationResult = {
|
||||
isValid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
try {
|
||||
// Resolve and validate script path
|
||||
const resolvedScriptPath = this.resolveScriptPath(
|
||||
config.scriptPath,
|
||||
config.workingDirectory
|
||||
);
|
||||
result.metadata.resolvedScriptPath = resolvedScriptPath;
|
||||
|
||||
const scriptExists = await this.validateScriptExists(resolvedScriptPath);
|
||||
result.metadata.scriptExists = scriptExists;
|
||||
|
||||
if (!scriptExists) {
|
||||
result.errors.push(`Script file '${resolvedScriptPath}' does not exist`);
|
||||
result.isValid = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check if script is executable
|
||||
const scriptExecutable = await this.validateScriptExecutable(resolvedScriptPath);
|
||||
result.metadata.scriptExecutable = scriptExecutable;
|
||||
|
||||
if (!scriptExecutable) {
|
||||
result.warnings.push(`Script file '${resolvedScriptPath}' may not be executable`);
|
||||
}
|
||||
|
||||
// Validate working directory
|
||||
if (config.workingDirectory) {
|
||||
const resolvedWorkingDir = resolve(config.workingDirectory);
|
||||
result.metadata.resolvedWorkingDirectory = resolvedWorkingDir;
|
||||
|
||||
const workingDirExists = await this.validateDirectory(resolvedWorkingDir);
|
||||
result.metadata.workingDirectoryExists = workingDirExists;
|
||||
|
||||
if (!workingDirExists) {
|
||||
result.errors.push(`Working directory '${resolvedWorkingDir}' does not exist`);
|
||||
result.isValid = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate output path and directory
|
||||
const resolvedOutputPath = this.resolveOutputPath(
|
||||
config.outputPath,
|
||||
config.workingDirectory
|
||||
);
|
||||
result.metadata.resolvedOutputPath = resolvedOutputPath;
|
||||
|
||||
const outputDirectory = dirname(resolvedOutputPath);
|
||||
const outputDirExists = await this.validateDirectory(outputDirectory);
|
||||
result.metadata.outputDirectoryExists = outputDirExists;
|
||||
|
||||
if (!outputDirExists) {
|
||||
result.errors.push(`Output directory '${outputDirectory}' does not exist`);
|
||||
result.isValid = false;
|
||||
} else {
|
||||
// Check if output directory is writable
|
||||
const outputDirWritable = await this.validateDirectoryWritable(outputDirectory);
|
||||
result.metadata.outputDirectoryWritable = outputDirWritable;
|
||||
|
||||
if (!outputDirWritable) {
|
||||
result.errors.push(`Output directory '${outputDirectory}' is not writable`);
|
||||
result.isValid = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate environment variables
|
||||
if (config.environment) {
|
||||
const envValid = this.validateEnvironmentVariables(config.environment);
|
||||
result.metadata.environmentVariablesValid = envValid;
|
||||
|
||||
if (!envValid) {
|
||||
result.warnings.push('Some environment variables may contain invalid values');
|
||||
}
|
||||
}
|
||||
|
||||
// Security validations
|
||||
this.performSecurityValidations(config, result);
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
result.errors.push(`Validation failed: ${errorMessage}`);
|
||||
result.isValid = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private resolveScriptPath(scriptPath: string, workingDirectory?: string): string {
|
||||
if (isAbsolute(scriptPath)) {
|
||||
return scriptPath;
|
||||
}
|
||||
|
||||
const baseDir = workingDirectory || process.cwd();
|
||||
return resolve(baseDir, scriptPath);
|
||||
}
|
||||
|
||||
private resolveOutputPath(outputPath: string, workingDirectory?: string): string {
|
||||
if (isAbsolute(outputPath)) {
|
||||
return outputPath;
|
||||
}
|
||||
|
||||
const baseDir = workingDirectory || process.cwd();
|
||||
return resolve(baseDir, outputPath);
|
||||
}
|
||||
|
||||
async validateScriptExists(scriptPath: string): Promise<boolean> {
|
||||
try {
|
||||
await access(scriptPath);
|
||||
const stats = await stat(scriptPath);
|
||||
return stats.isFile();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async validateScriptExecutable(scriptPath: string): Promise<boolean> {
|
||||
try {
|
||||
const stats = await stat(scriptPath);
|
||||
// Check if file has execute permissions (basic check)
|
||||
return (stats.mode & parseInt('111', 8)) !== 0;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async validateDirectory(dirPath: string): Promise<boolean> {
|
||||
try {
|
||||
await access(dirPath);
|
||||
const stats = await stat(dirPath);
|
||||
return stats.isDirectory();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async validateDirectoryWritable(dirPath: string): Promise<boolean> {
|
||||
try {
|
||||
const stats = await stat(dirPath);
|
||||
// Check if directory has write permissions (basic check)
|
||||
return (stats.mode & parseInt('200', 8)) !== 0;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
validateEnvironmentVariables(environment: Record<string, string>): boolean {
|
||||
try {
|
||||
// Check for potentially dangerous environment variables
|
||||
const dangerousVars = ['PATH', 'LD_LIBRARY_PATH', 'HOME', 'USER'];
|
||||
const hasDangerousVars = Object.keys(environment).some((key) =>
|
||||
dangerousVars.includes(key.toUpperCase())
|
||||
);
|
||||
|
||||
if (hasDangerousVars) {
|
||||
this.logger.warn('Script environment contains potentially dangerous variables');
|
||||
}
|
||||
|
||||
// Check for valid variable names (basic validation)
|
||||
const validVarName = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
||||
const invalidVars = Object.keys(environment).filter((key) => !validVarName.test(key));
|
||||
|
||||
if (invalidVars.length > 0) {
|
||||
this.logger.warn(`Invalid environment variable names: ${invalidVars.join(', ')}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private performSecurityValidations(
|
||||
config: ScriptPreprocessConfigInput,
|
||||
result: ScriptValidationResult
|
||||
): void {
|
||||
// Check for potentially dangerous script paths
|
||||
const dangerousPaths = ['/bin', '/usr/bin', '/sbin', '/usr/sbin'];
|
||||
const scriptInDangerousPath = dangerousPaths.some((path) =>
|
||||
result.metadata.resolvedScriptPath?.startsWith(path)
|
||||
);
|
||||
|
||||
if (scriptInDangerousPath) {
|
||||
result.warnings.push(
|
||||
'Script is located in a system directory. Ensure it is safe to execute.'
|
||||
);
|
||||
}
|
||||
|
||||
// Check for dangerous script arguments
|
||||
if (config.scriptArgs) {
|
||||
const dangerousArgs = config.scriptArgs.filter(
|
||||
(arg) =>
|
||||
arg.includes('..') ||
|
||||
arg.includes('rm ') ||
|
||||
arg.includes('sudo ') ||
|
||||
arg.includes('su ')
|
||||
);
|
||||
|
||||
if (dangerousArgs.length > 0) {
|
||||
result.warnings.push(
|
||||
'Script arguments contain potentially dangerous commands or paths.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if output path is in a safe location
|
||||
if (result.metadata.resolvedOutputPath) {
|
||||
const systemPaths = ['/bin', '/usr', '/etc', '/var', '/sys', '/proc'];
|
||||
const outputInSystemPath = systemPaths.some((path) =>
|
||||
result.metadata.resolvedOutputPath?.startsWith(path)
|
||||
);
|
||||
|
||||
if (outputInSystemPath) {
|
||||
result.errors.push('Output path cannot be in system directories for security reasons.');
|
||||
result.isValid = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate script file extension for common script types
|
||||
if (result.metadata.resolvedScriptPath) {
|
||||
const scriptExt = result.metadata.resolvedScriptPath.split('.').pop()?.toLowerCase();
|
||||
const allowedExtensions = ['sh', 'bash', 'py', 'pl', 'rb', 'js', 'php'];
|
||||
|
||||
if (scriptExt && !allowedExtensions.includes(scriptExt)) {
|
||||
result.warnings.push(
|
||||
`Script extension '.${scriptExt}' is not commonly recognized. Ensure it is executable.`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async getScriptInfo(scriptPath: string): Promise<{
|
||||
size: number | null;
|
||||
lastModified: Date | null;
|
||||
permissions: string | null;
|
||||
}> {
|
||||
try {
|
||||
const stats = await stat(scriptPath);
|
||||
return {
|
||||
size: stats.size,
|
||||
lastModified: stats.mtime,
|
||||
permissions: '0' + (stats.mode & parseInt('777', 8)).toString(8),
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
size: null,
|
||||
lastModified: null,
|
||||
permissions: null,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import {
|
||||
BackupSourceConfig,
|
||||
BackupSourceProcessor,
|
||||
BackupSourceProcessorOptions,
|
||||
BackupSourceResult,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js';
|
||||
import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js';
|
||||
import { ZfsPreprocessConfig } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.js';
|
||||
import { ZfsValidationService } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.js';
|
||||
|
||||
export interface ZfsSourceConfig extends BackupSourceConfig {
|
||||
poolName: string;
|
||||
datasetName: string;
|
||||
snapshotPrefix?: string;
|
||||
cleanupSnapshots: boolean;
|
||||
retainSnapshots?: number;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class ZfsSourceProcessor extends BackupSourceProcessor<ZfsSourceConfig> {
|
||||
readonly sourceType = SourceType.ZFS;
|
||||
private readonly logger = new Logger(ZfsSourceProcessor.name);
|
||||
|
||||
constructor(private readonly zfsValidationService: ZfsValidationService) {
|
||||
super();
|
||||
}
|
||||
|
||||
get supportsStreaming(): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
async validate(
|
||||
config: ZfsSourceConfig
|
||||
): Promise<{ valid: boolean; error?: string; warnings?: string[] }> {
|
||||
try {
|
||||
const result = await this.zfsValidationService.validateZfsConfig(config as any);
|
||||
return {
|
||||
valid: result.isValid,
|
||||
error: result.errors.length > 0 ? result.errors.join(', ') : undefined,
|
||||
warnings: result.warnings,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
return { valid: false, error: errorMessage };
|
||||
}
|
||||
}
|
||||
|
||||
async execute(
|
||||
config: ZfsSourceConfig,
|
||||
options?: BackupSourceProcessorOptions
|
||||
): Promise<BackupSourceResult> {
|
||||
try {
|
||||
this.logger.log(`Starting ZFS backup for dataset: ${config.poolName}/${config.datasetName}`);
|
||||
|
||||
const validation = await this.validate(config);
|
||||
if (!validation.valid) {
|
||||
return {
|
||||
success: false,
|
||||
error: validation.error || 'ZFS validation failed',
|
||||
cleanupRequired: false,
|
||||
};
|
||||
}
|
||||
|
||||
const snapshotName = await this.createSnapshot(config);
|
||||
const snapshotPath = `${config.poolName}/${config.datasetName}@${snapshotName}`;
|
||||
|
||||
this.logger.log(`Created ZFS snapshot: ${snapshotPath}`);
|
||||
|
||||
const result: BackupSourceResult = {
|
||||
success: true,
|
||||
outputPath: snapshotPath,
|
||||
snapshotName,
|
||||
cleanupRequired: config.cleanupSnapshots,
|
||||
metadata: {
|
||||
poolName: config.poolName,
|
||||
datasetName: config.datasetName,
|
||||
snapshotPath,
|
||||
},
|
||||
};
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
this.logger.error(`ZFS backup failed: ${errorMessage}`, error);
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
cleanupRequired: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup(result: BackupSourceResult): Promise<void> {
|
||||
if (!result.cleanupRequired || !result.snapshotName) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const snapshotPath = (result.metadata?.snapshotPath as string) || result.outputPath;
|
||||
if (snapshotPath && typeof snapshotPath === 'string') {
|
||||
await this.destroySnapshot(snapshotPath);
|
||||
this.logger.log(`Cleaned up ZFS snapshot: ${snapshotPath}`);
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to cleanup ZFS snapshot: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
private async createSnapshot(config: ZfsSourceConfig): Promise<string> {
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
const prefix = config.snapshotPrefix || 'backup';
|
||||
const snapshotName = `${prefix}-${timestamp}`;
|
||||
const snapshotPath = `${config.poolName}/${config.datasetName}@${snapshotName}`;
|
||||
|
||||
const { stdout, stderr } = await execa('zfs', ['snapshot', snapshotPath]);
|
||||
|
||||
if (stderr) {
|
||||
this.logger.warn(`ZFS snapshot creation warning: ${stderr}`);
|
||||
}
|
||||
|
||||
this.logger.debug(`ZFS snapshot created: ${stdout}`);
|
||||
return snapshotName;
|
||||
}
|
||||
|
||||
private async destroySnapshot(snapshotPath: string): Promise<void> {
|
||||
const { stdout, stderr } = await execa('zfs', ['destroy', snapshotPath]);
|
||||
|
||||
if (stderr) {
|
||||
this.logger.warn(`ZFS snapshot destruction warning: ${stderr}`);
|
||||
}
|
||||
|
||||
this.logger.debug(`ZFS snapshot destroyed: ${stdout}`);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
import { Field, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { IsBoolean, IsNotEmpty, IsNumber, IsOptional, IsString, Min } from 'class-validator';
|
||||
|
||||
import {
|
||||
BaseSourceConfig,
|
||||
BaseSourceConfigInput,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js';
|
||||
|
||||
@InputType()
|
||||
export class ZfsPreprocessConfigInput extends BaseSourceConfigInput {
|
||||
@Field(() => String, { description: 'ZFS pool name' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
poolName!: string;
|
||||
|
||||
@Field(() => String, { description: 'Dataset name within the pool' })
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
datasetName!: string;
|
||||
|
||||
@Field(() => String, { description: 'Snapshot name prefix', nullable: true })
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
snapshotPrefix?: string;
|
||||
|
||||
@Field(() => Boolean, {
|
||||
description: 'Whether to cleanup snapshots after backup',
|
||||
defaultValue: true,
|
||||
})
|
||||
@IsBoolean()
|
||||
cleanupSnapshots!: boolean;
|
||||
|
||||
@Field(() => Number, { description: 'Number of snapshots to retain', nullable: true })
|
||||
@IsOptional()
|
||||
@IsNumber()
|
||||
@Min(1)
|
||||
retainSnapshots?: number;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class ZfsPreprocessConfig implements BaseSourceConfig {
|
||||
@Field(() => String, { nullable: false })
|
||||
label: string = 'ZFS backup';
|
||||
|
||||
@Field(() => String)
|
||||
poolName!: string;
|
||||
|
||||
@Field(() => String)
|
||||
datasetName!: string;
|
||||
|
||||
@Field(() => String, { nullable: true })
|
||||
snapshotPrefix?: string;
|
||||
|
||||
@Field(() => Boolean)
|
||||
cleanupSnapshots!: boolean;
|
||||
|
||||
@Field(() => Number, { nullable: true })
|
||||
retainSnapshots?: number;
|
||||
|
||||
static isTypeOf(obj: any): obj is ZfsPreprocessConfig {
|
||||
return obj && typeof obj.poolName === 'string' && typeof obj.datasetName === 'string';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,245 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { access, constants } from 'fs/promises';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import { ZfsPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.js';
|
||||
|
||||
export interface ZfsValidationResult {
|
||||
isValid: boolean;
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
metadata: {
|
||||
poolExists?: boolean;
|
||||
datasetExists?: boolean;
|
||||
datasetSize?: number;
|
||||
availableSpace?: number;
|
||||
mountpoint?: string;
|
||||
};
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class ZfsValidationService {
|
||||
private readonly logger = new Logger(ZfsValidationService.name);
|
||||
|
||||
async validateZfsConfig(config: ZfsPreprocessConfigInput): Promise<ZfsValidationResult> {
|
||||
const result: ZfsValidationResult = {
|
||||
isValid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
try {
|
||||
// Validate pool exists
|
||||
const poolExists = await this.validatePool(config.poolName);
|
||||
result.metadata.poolExists = poolExists;
|
||||
|
||||
if (!poolExists) {
|
||||
result.errors.push(`ZFS pool '${config.poolName}' does not exist`);
|
||||
result.isValid = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Validate dataset exists
|
||||
const datasetExists = await this.validateDataset(config.poolName, config.datasetName);
|
||||
result.metadata.datasetExists = datasetExists;
|
||||
|
||||
if (!datasetExists) {
|
||||
result.errors.push(
|
||||
`ZFS dataset '${config.poolName}/${config.datasetName}' does not exist`
|
||||
);
|
||||
result.isValid = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Get dataset information
|
||||
const datasetInfo = await this.getDatasetInfo(config.poolName, config.datasetName);
|
||||
result.metadata = { ...result.metadata, ...datasetInfo };
|
||||
|
||||
// Validate dataset is mounted
|
||||
if (!datasetInfo.mountpoint || datasetInfo.mountpoint === 'none') {
|
||||
result.warnings.push(
|
||||
`Dataset '${config.poolName}/${config.datasetName}' is not mounted`
|
||||
);
|
||||
}
|
||||
|
||||
// Check available space for snapshots
|
||||
if (datasetInfo.availableSpace && datasetInfo.datasetSize) {
|
||||
const spaceRatio = datasetInfo.availableSpace / datasetInfo.datasetSize;
|
||||
if (spaceRatio < 0.1) {
|
||||
result.warnings.push(
|
||||
'Low available space for snapshot creation (less than 10% of dataset size)'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate snapshot retention settings
|
||||
if (config.retainSnapshots && config.retainSnapshots < 1) {
|
||||
result.errors.push('Retain snapshots must be at least 1');
|
||||
result.isValid = false;
|
||||
}
|
||||
|
||||
// Check for existing snapshots if cleanup is disabled
|
||||
if (!config.cleanupSnapshots) {
|
||||
const existingSnapshots = await this.getExistingSnapshots(
|
||||
config.poolName,
|
||||
config.datasetName,
|
||||
config.snapshotPrefix
|
||||
);
|
||||
if (existingSnapshots.length > 10) {
|
||||
result.warnings.push(
|
||||
`Found ${existingSnapshots.length} existing snapshots. Consider enabling cleanup.`
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
result.errors.push(`Validation failed: ${errorMessage}`);
|
||||
result.isValid = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async validatePool(poolName: string): Promise<boolean> {
|
||||
try {
|
||||
await execa('zpool', ['list', '-H', '-o', 'name', poolName]);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async validateDataset(poolName: string, datasetName: string): Promise<boolean> {
|
||||
const fullPath = `${poolName}/${datasetName}`;
|
||||
try {
|
||||
await execa('zfs', ['list', '-H', '-o', 'name', fullPath]);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async getDatasetInfo(
|
||||
poolName: string,
|
||||
datasetName: string
|
||||
): Promise<{
|
||||
datasetSize?: number;
|
||||
availableSpace?: number;
|
||||
mountpoint?: string;
|
||||
}> {
|
||||
const fullPath = `${poolName}/${datasetName}`;
|
||||
const result: { datasetSize?: number; availableSpace?: number; mountpoint?: string } = {};
|
||||
|
||||
try {
|
||||
// Get dataset size
|
||||
const { stdout: sizeOutput } = await execa('zfs', [
|
||||
'list',
|
||||
'-H',
|
||||
'-p',
|
||||
'-o',
|
||||
'used',
|
||||
fullPath,
|
||||
]);
|
||||
const size = parseInt(sizeOutput.trim(), 10);
|
||||
if (!isNaN(size)) {
|
||||
result.datasetSize = size;
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
this.logger.warn(
|
||||
`Failed to get dataset size: ${error instanceof Error ? error.message : String(error)}`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Get available space
|
||||
const { stdout: availOutput } = await execa('zfs', [
|
||||
'list',
|
||||
'-H',
|
||||
'-p',
|
||||
'-o',
|
||||
'avail',
|
||||
fullPath,
|
||||
]);
|
||||
const avail = parseInt(availOutput.trim(), 10);
|
||||
if (!isNaN(avail)) {
|
||||
result.availableSpace = avail;
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
this.logger.warn(
|
||||
`Failed to get available space: ${error instanceof Error ? error.message : String(error)}`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Get mountpoint
|
||||
const { stdout: mountOutput } = await execa('zfs', [
|
||||
'list',
|
||||
'-H',
|
||||
'-o',
|
||||
'mountpoint',
|
||||
fullPath,
|
||||
]);
|
||||
result.mountpoint = mountOutput.trim();
|
||||
} catch (error: unknown) {
|
||||
this.logger.warn(
|
||||
`Failed to get mountpoint: ${error instanceof Error ? error.message : String(error)}`
|
||||
);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async getExistingSnapshots(
|
||||
poolName: string,
|
||||
datasetName: string,
|
||||
prefix?: string
|
||||
): Promise<string[]> {
|
||||
const fullPath = `${poolName}/${datasetName}`;
|
||||
|
||||
try {
|
||||
const { stdout } = await execa('zfs', [
|
||||
'list',
|
||||
'-H',
|
||||
'-t',
|
||||
'snapshot',
|
||||
'-o',
|
||||
'name',
|
||||
'-r',
|
||||
fullPath,
|
||||
]);
|
||||
const snapshots = stdout.split('\n').filter((line) => line.trim());
|
||||
|
||||
if (prefix) {
|
||||
const prefixPattern = `${fullPath}@${prefix}`;
|
||||
return snapshots.filter((snapshot) => snapshot.startsWith(prefixPattern));
|
||||
}
|
||||
|
||||
return snapshots.filter((snapshot) => snapshot.startsWith(`${fullPath}@`));
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async getPoolHealth(poolName: string): Promise<string | null> {
|
||||
try {
|
||||
const { stdout } = await execa('zpool', ['list', '-H', '-o', 'health', poolName]);
|
||||
return stdout.trim();
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async canCreateSnapshot(poolName: string, datasetName: string): Promise<boolean> {
|
||||
// Check if we have write permissions and the dataset is not readonly
|
||||
const fullPath = `${poolName}/${datasetName}`;
|
||||
|
||||
try {
|
||||
const { stdout } = await execa('zfs', ['get', '-H', '-o', 'value', 'readonly', fullPath]);
|
||||
return stdout.trim() === 'off';
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,11 @@ export class DockerMutations {}
|
||||
@ObjectType()
|
||||
export class VmMutations {}
|
||||
|
||||
@ObjectType({
|
||||
description: 'Backup related mutations',
|
||||
})
|
||||
export class BackupMutations {}
|
||||
|
||||
@ObjectType({
|
||||
description: 'API Key related mutations',
|
||||
})
|
||||
@@ -51,6 +56,9 @@ export class RootMutations {
|
||||
@Field(() => VmMutations, { description: 'VM related mutations' })
|
||||
vm: VmMutations = new VmMutations();
|
||||
|
||||
@Field(() => BackupMutations, { description: 'Backup related mutations' })
|
||||
backup: BackupMutations = new BackupMutations();
|
||||
|
||||
@Field(() => ApiKeyMutations, { description: 'API Key related mutations' })
|
||||
apiKey: ApiKeyMutations = new ApiKeyMutations();
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import { Mutation, Resolver } from '@nestjs/graphql';
|
||||
import {
|
||||
ApiKeyMutations,
|
||||
ArrayMutations,
|
||||
BackupMutations,
|
||||
DockerMutations,
|
||||
ParityCheckMutations,
|
||||
RCloneMutations,
|
||||
@@ -27,6 +28,11 @@ export class RootMutationsResolver {
|
||||
return new VmMutations();
|
||||
}
|
||||
|
||||
@Mutation(() => BackupMutations, { name: 'backup' })
|
||||
backup(): BackupMutations {
|
||||
return new BackupMutations();
|
||||
}
|
||||
|
||||
@Mutation(() => ParityCheckMutations, { name: 'parityCheck' })
|
||||
parityCheck(): ParityCheckMutations {
|
||||
return new ParityCheckMutations();
|
||||
|
||||
2626
api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html
Normal file
2626
api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -5,18 +5,27 @@ import { existsSync } from 'node:fs';
|
||||
import { mkdir, rm, writeFile } from 'node:fs/promises';
|
||||
import { dirname, join } from 'node:path';
|
||||
|
||||
import { convert } from 'convert';
|
||||
import { execa } from 'execa';
|
||||
import got, { HTTPError } from 'got';
|
||||
import pRetry from 'p-retry';
|
||||
|
||||
import { sanitizeParams } from '@app/core/log.js';
|
||||
import {
|
||||
getConfigIdFromGroupId,
|
||||
isBackupJobGroup,
|
||||
} from '@app/unraid-api/graph/resolvers/backup/backup.utils.js';
|
||||
import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import { RCloneStatusService } from '@app/unraid-api/graph/resolvers/rclone/rclone-status.service.js';
|
||||
import {
|
||||
CreateRCloneRemoteDto,
|
||||
DeleteRCloneRemoteDto,
|
||||
GetRCloneJobStatusDto,
|
||||
GetRCloneRemoteConfigDto,
|
||||
GetRCloneRemoteDetailsDto,
|
||||
RCloneProviderOptionResponse,
|
||||
RCloneJob,
|
||||
RCloneJobListResponse,
|
||||
RCloneJobStats,
|
||||
RCloneProviderResponse,
|
||||
RCloneRemoteConfig,
|
||||
RCloneStartBackupInput,
|
||||
@@ -24,72 +33,109 @@ import {
|
||||
} from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import { validateObject } from '@app/unraid-api/graph/resolvers/validation.utils.js';
|
||||
|
||||
// Constants for the service
|
||||
const CONSTANTS = {
|
||||
LOG_LEVEL: {
|
||||
DEBUG: 'DEBUG',
|
||||
INFO: 'INFO',
|
||||
},
|
||||
RETRY_CONFIG: {
|
||||
retries: 6,
|
||||
minTimeout: 100,
|
||||
maxTimeout: 5000,
|
||||
factor: 2,
|
||||
maxRetryTime: 30000,
|
||||
},
|
||||
TIMEOUTS: {
|
||||
GRACEFUL_SHUTDOWN: 2000,
|
||||
PROCESS_CLEANUP: 1000,
|
||||
},
|
||||
};
|
||||
|
||||
// Internal interface for job status response from RClone API
|
||||
interface RCloneJobStatusResponse {
|
||||
id?: string | number;
|
||||
group?: string;
|
||||
stats?: RCloneJobStats;
|
||||
finished?: boolean;
|
||||
error?: string;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
interface BackupStatusResult {
|
||||
isRunning: boolean;
|
||||
stats: RCloneJobStats | null;
|
||||
jobCount: number;
|
||||
activeJobs: RCloneJobStatusResponse[];
|
||||
}
|
||||
|
||||
interface JobOperationResult {
|
||||
stopped: string[];
|
||||
forgotten?: string[];
|
||||
errors: string[];
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
private isInitialized: boolean = false;
|
||||
private initialized: boolean = false;
|
||||
private readonly logger = new Logger(RCloneApiService.name);
|
||||
private rcloneSocketPath: string = '';
|
||||
private rcloneBaseUrl: string = '';
|
||||
private rcloneProcess: ChildProcess | null = null;
|
||||
private readonly rcloneUsername: string =
|
||||
process.env.RCLONE_USERNAME || crypto.randomBytes(12).toString('base64');
|
||||
process.env.RCLONE_USERNAME ||
|
||||
(process.env.NODE_ENV === 'test' ? 'test-user' : crypto.randomBytes(12).toString('hex'));
|
||||
private readonly rclonePassword: string =
|
||||
process.env.RCLONE_PASSWORD || crypto.randomBytes(24).toString('base64');
|
||||
constructor() {}
|
||||
process.env.RCLONE_PASSWORD ||
|
||||
(process.env.NODE_ENV === 'test' ? 'test-pass' : crypto.randomBytes(24).toString('hex'));
|
||||
|
||||
/**
|
||||
* Returns whether the RClone service is initialized and ready to use
|
||||
*/
|
||||
get initialized(): boolean {
|
||||
return this.isInitialized;
|
||||
constructor(private readonly statusService: RCloneStatusService) {}
|
||||
|
||||
get isInitialized(): boolean {
|
||||
return this.initialized;
|
||||
}
|
||||
|
||||
async onModuleInit(): Promise<void> {
|
||||
try {
|
||||
// Check if rclone binary is available first
|
||||
const isBinaryAvailable = await this.checkRcloneBinaryExists();
|
||||
if (!isBinaryAvailable) {
|
||||
this.logger.warn('RClone binary not found on system, skipping initialization');
|
||||
this.isInitialized = false;
|
||||
return;
|
||||
}
|
||||
// Check if rclone binary is available first
|
||||
const isBinaryAvailable = await this.checkRcloneBinaryExists();
|
||||
if (!isBinaryAvailable) {
|
||||
this.logger.warn('RClone binary not found on system, skipping initialization');
|
||||
this.initialized = false;
|
||||
return;
|
||||
}
|
||||
|
||||
const { getters } = await import('@app/store/index.js');
|
||||
// Check if Rclone Socket is running, if not, start it.
|
||||
this.rcloneSocketPath = getters.paths()['rclone-socket'];
|
||||
const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log');
|
||||
this.logger.log(`RClone socket path: ${this.rcloneSocketPath}`);
|
||||
this.logger.log(`RClone log file path: ${logFilePath}`);
|
||||
const { getters } = await import('@app/store/index.js');
|
||||
// Check if Rclone Socket is running, if not, start it.
|
||||
this.rcloneSocketPath = getters.paths()['rclone-socket'];
|
||||
const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log');
|
||||
this.logger.log(`RClone socket path: ${this.rcloneSocketPath}`);
|
||||
this.logger.log(`RClone log file path: ${logFilePath}`);
|
||||
|
||||
// Format the base URL for Unix socket
|
||||
this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`;
|
||||
// Format the base URL for Unix socket
|
||||
this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`;
|
||||
|
||||
// Check if the RClone socket exists, if not, create it.
|
||||
const socketExists = await this.checkRcloneSocketExists(this.rcloneSocketPath);
|
||||
// Check if the RClone socket exists, if not, create it.
|
||||
const socketExists = await this.checkRcloneSocketExists(this.rcloneSocketPath);
|
||||
|
||||
if (socketExists) {
|
||||
const isRunning = await this.checkRcloneSocketRunning();
|
||||
if (isRunning) {
|
||||
this.isInitialized = true;
|
||||
return;
|
||||
} else {
|
||||
this.logger.warn(
|
||||
'RClone socket is not running but socket exists, removing socket before starting...'
|
||||
);
|
||||
await rm(this.rcloneSocketPath, { force: true });
|
||||
}
|
||||
|
||||
this.logger.warn('RClone socket is not running, starting it...');
|
||||
this.isInitialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath);
|
||||
if (socketExists) {
|
||||
const isRunning = await this.checkRcloneSocketRunning();
|
||||
if (isRunning) {
|
||||
this.initialized = true;
|
||||
return;
|
||||
} else {
|
||||
this.logger.warn('RClone socket does not exist, creating it...');
|
||||
this.isInitialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath);
|
||||
return;
|
||||
this.logger.warn(
|
||||
'RClone socket is not running but socket exists, removing socket before starting...'
|
||||
);
|
||||
await rm(this.rcloneSocketPath, { force: true });
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
this.logger.error(`Error initializing RCloneApiService: ${error}`);
|
||||
this.isInitialized = false;
|
||||
|
||||
this.logger.warn('RClone socket is not running, starting it...');
|
||||
this.initialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath);
|
||||
return;
|
||||
} else {
|
||||
this.logger.warn('RClone socket does not exist, creating it...');
|
||||
this.initialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,95 +144,145 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
this.logger.log('RCloneApiService module destroyed');
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the RClone RC daemon on the specified socket path
|
||||
*/
|
||||
private async initializeRCloneService(): Promise<void> {
|
||||
const { getters } = await import('@app/store/index.js');
|
||||
this.rcloneSocketPath = getters.paths()['rclone-socket'];
|
||||
const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log');
|
||||
|
||||
this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`;
|
||||
this.logger.log(
|
||||
`Ensuring RClone is stopped and socket is clean before initialization. Socket path: ${this.rcloneSocketPath}`
|
||||
);
|
||||
|
||||
// Stop any existing rclone instances and remove the socket file.
|
||||
await this.stopRcloneSocket();
|
||||
|
||||
this.logger.warn('Proceeding to start new RClone socket...');
|
||||
this.initialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath);
|
||||
}
|
||||
|
||||
private async startRcloneSocket(socketPath: string, logFilePath: string): Promise<boolean> {
|
||||
try {
|
||||
// Make log file exists
|
||||
if (!existsSync(logFilePath)) {
|
||||
this.logger.debug(`Creating log file: ${logFilePath}`);
|
||||
await mkdir(dirname(logFilePath), { recursive: true });
|
||||
await writeFile(logFilePath, '', 'utf-8');
|
||||
}
|
||||
await this.ensureLogFileExists(logFilePath);
|
||||
|
||||
const rcloneArgs = this.buildRcloneArgs(socketPath, logFilePath);
|
||||
this.logger.log(`Starting RClone RC daemon on socket: ${socketPath}`);
|
||||
// Start the process but don't wait for it to finish
|
||||
this.rcloneProcess = execa(
|
||||
'rclone',
|
||||
[
|
||||
'rcd',
|
||||
'--rc-addr',
|
||||
socketPath,
|
||||
'--log-level',
|
||||
'INFO',
|
||||
'--log-file',
|
||||
logFilePath,
|
||||
...(this.rcloneUsername ? ['--rc-user', this.rcloneUsername] : []),
|
||||
...(this.rclonePassword ? ['--rc-pass', this.rclonePassword] : []),
|
||||
],
|
||||
{ detached: false } // Keep attached to manage lifecycle
|
||||
);
|
||||
|
||||
// Handle potential errors during process spawning (e.g., command not found)
|
||||
this.rcloneProcess.on('error', (error: Error) => {
|
||||
this.logger.error(`RClone process failed to start: ${error.message}`);
|
||||
this.rcloneProcess = null; // Clear the handle on error
|
||||
this.isInitialized = false;
|
||||
});
|
||||
const rcloneProcessExecution = execa('rclone', rcloneArgs, { detached: false });
|
||||
this.rcloneProcess = rcloneProcessExecution;
|
||||
this.setupProcessListeners();
|
||||
|
||||
// Handle unexpected exit
|
||||
this.rcloneProcess.on('exit', (code, signal) => {
|
||||
this.logger.warn(
|
||||
`RClone process exited unexpectedly with code: ${code}, signal: ${signal}`
|
||||
rcloneProcessExecution.catch((error) => {
|
||||
this.logger.debug(
|
||||
`Rclone process execution promise rejected (expected if process failed to start or exited prematurely): ${
|
||||
error.shortMessage || error.message
|
||||
}`
|
||||
);
|
||||
this.rcloneProcess = null;
|
||||
this.isInitialized = false;
|
||||
});
|
||||
|
||||
// Wait for socket to be ready using p-retry with exponential backoff
|
||||
await pRetry(
|
||||
async () => {
|
||||
const isRunning = await this.checkRcloneSocketRunning();
|
||||
if (!isRunning) throw new Error('Rclone socket not ready');
|
||||
},
|
||||
{
|
||||
retries: 6, // 7 attempts total
|
||||
minTimeout: 100,
|
||||
maxTimeout: 5000,
|
||||
factor: 2,
|
||||
maxRetryTime: 30000,
|
||||
}
|
||||
);
|
||||
|
||||
await this.waitForSocketReady();
|
||||
this.logger.log('RClone RC daemon started and socket is ready.');
|
||||
return true;
|
||||
} catch (error: unknown) {
|
||||
this.logger.error(`Error starting RClone RC daemon: ${error}`);
|
||||
this.rcloneProcess?.kill(); // Attempt to kill if started but failed later
|
||||
this.rcloneProcess = null;
|
||||
this.logger.error(`Error during RClone RC daemon startup sequence: ${error}`);
|
||||
this.cleanupFailedProcess();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private async stopRcloneSocket(): Promise<void> {
|
||||
if (this.rcloneProcess && !this.rcloneProcess.killed) {
|
||||
this.logger.log(`Stopping RClone RC daemon process (PID: ${this.rcloneProcess.pid})...`);
|
||||
try {
|
||||
const killed = this.rcloneProcess.kill('SIGTERM'); // Send SIGTERM first
|
||||
if (!killed) {
|
||||
this.logger.warn('Failed to kill RClone process with SIGTERM, trying SIGKILL.');
|
||||
this.rcloneProcess.kill('SIGKILL'); // Force kill if SIGTERM failed
|
||||
}
|
||||
this.logger.log('RClone process stopped.');
|
||||
} catch (error: unknown) {
|
||||
this.logger.error(`Error stopping RClone process: ${error}`);
|
||||
} finally {
|
||||
this.rcloneProcess = null; // Clear the handle
|
||||
}
|
||||
private async ensureLogFileExists(logFilePath: string): Promise<void> {
|
||||
if (!existsSync(logFilePath)) {
|
||||
await mkdir(dirname(logFilePath), { recursive: true });
|
||||
await writeFile(logFilePath, '', 'utf-8');
|
||||
}
|
||||
}
|
||||
|
||||
private buildRcloneArgs(socketPath: string, logFilePath: string): string[] {
|
||||
// Unix sockets don't require HTTP authentication - the socket itself provides security
|
||||
const isUnixSocket = socketPath.startsWith('/');
|
||||
|
||||
if (isUnixSocket) {
|
||||
this.logger.log('Using Unix socket - HTTP authentication not required, using --rc-no-auth');
|
||||
} else {
|
||||
this.logger.log('RClone process not running or already stopped.');
|
||||
this.logger.log(
|
||||
`Building RClone args with username: ${this.rcloneUsername ? '[SET]' : '[NOT SET]'}, password: ${this.rclonePassword ? '[SET]' : '[NOT SET]'}`
|
||||
);
|
||||
}
|
||||
|
||||
// Clean up the socket file if it exists
|
||||
const args = [
|
||||
'rcd',
|
||||
'--rc-addr',
|
||||
socketPath,
|
||||
'--log-level',
|
||||
'INFO',
|
||||
'--log-file',
|
||||
logFilePath,
|
||||
// For Unix sockets, use --rc-no-auth instead of credentials
|
||||
...(isUnixSocket ? ['--rc-no-auth'] : []),
|
||||
// Only add authentication for non-Unix socket connections
|
||||
...(!isUnixSocket && this.rcloneUsername ? ['--rc-user', this.rcloneUsername] : []),
|
||||
...(!isUnixSocket && this.rclonePassword ? ['--rc-pass', this.rclonePassword] : []),
|
||||
];
|
||||
|
||||
this.logger.log(`RClone command args: ${args.join(' ')}`);
|
||||
return args;
|
||||
}
|
||||
|
||||
private setupProcessListeners(): void {
|
||||
if (!this.rcloneProcess) return;
|
||||
|
||||
this.rcloneProcess.on('error', (error: Error) => {
|
||||
this.logger.error(`RClone process failed to start: ${error.message}`);
|
||||
this.cleanupFailedProcess();
|
||||
});
|
||||
|
||||
this.rcloneProcess.on('exit', (code, signal) => {
|
||||
this.logger.warn(`RClone process exited unexpectedly with code: ${code}, signal: ${signal}`);
|
||||
this.cleanupFailedProcess();
|
||||
});
|
||||
}
|
||||
|
||||
private cleanupFailedProcess(): void {
|
||||
this.rcloneProcess = null;
|
||||
this.initialized = false;
|
||||
}
|
||||
|
||||
private async waitForSocketReady(): Promise<void> {
|
||||
await pRetry(async () => {
|
||||
const isRunning = await this.checkRcloneSocketRunning();
|
||||
if (!isRunning) throw new Error('Rclone socket not ready');
|
||||
}, CONSTANTS.RETRY_CONFIG);
|
||||
}
|
||||
|
||||
private async stopRcloneSocket(): Promise<void> {
|
||||
if (this.rcloneProcess && !this.rcloneProcess.killed) {
|
||||
await this.terminateProcess();
|
||||
}
|
||||
|
||||
await this.killExistingRcloneProcesses();
|
||||
await this.removeSocketFile();
|
||||
}
|
||||
|
||||
private async terminateProcess(): Promise<void> {
|
||||
if (!this.rcloneProcess) return;
|
||||
|
||||
this.logger.log(`Stopping RClone RC daemon process (PID: ${this.rcloneProcess.pid})...`);
|
||||
|
||||
try {
|
||||
const killed = this.rcloneProcess.kill('SIGTERM');
|
||||
if (!killed) {
|
||||
this.logger.warn('Failed to kill with SIGTERM, using SIGKILL');
|
||||
this.rcloneProcess.kill('SIGKILL');
|
||||
}
|
||||
this.logger.log('RClone process stopped');
|
||||
} catch (error: unknown) {
|
||||
this.logger.error(`Error stopping RClone process: ${error}`);
|
||||
} finally {
|
||||
this.rcloneProcess = null;
|
||||
}
|
||||
}
|
||||
|
||||
private async removeSocketFile(): Promise<void> {
|
||||
if (this.rcloneSocketPath && existsSync(this.rcloneSocketPath)) {
|
||||
this.logger.log(`Removing RClone socket file: ${this.rcloneSocketPath}`);
|
||||
try {
|
||||
@@ -197,36 +293,19 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the RClone socket exists
|
||||
*/
|
||||
private async checkRcloneSocketExists(socketPath: string): Promise<boolean> {
|
||||
const socketExists = existsSync(socketPath);
|
||||
if (!socketExists) {
|
||||
this.logger.warn(`RClone socket does not exist at: ${socketPath}`);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return socketExists;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the RClone socket is running
|
||||
*/
|
||||
private async checkRcloneSocketRunning(): Promise<boolean> {
|
||||
try {
|
||||
// A simple API call to check if the daemon is responsive
|
||||
await this.callRcloneApi('core/pid');
|
||||
this.logger.debug('RClone socket is running and responsive.');
|
||||
return true;
|
||||
} catch (error: unknown) {
|
||||
// Silently handle socket connection errors during checks
|
||||
if (error instanceof Error) {
|
||||
if (error.message.includes('ENOENT') || error.message.includes('ECONNREFUSED')) {
|
||||
this.logger.debug('RClone socket not accessible - daemon likely not running');
|
||||
} else {
|
||||
this.logger.debug(`RClone socket check failed: ${error.message}`);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -267,18 +346,11 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
return response?.remotes || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get complete remote details
|
||||
*/
|
||||
async getRemoteDetails(input: GetRCloneRemoteDetailsDto): Promise<RCloneRemoteConfig> {
|
||||
await validateObject(GetRCloneRemoteDetailsDto, input);
|
||||
const config = (await this.getRemoteConfig({ name: input.name })) || {};
|
||||
return config as RCloneRemoteConfig;
|
||||
return this.getRemoteConfig({ name: input.name });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get configuration of a remote
|
||||
*/
|
||||
async getRemoteConfig(input: GetRCloneRemoteConfigDto): Promise<RCloneRemoteConfig> {
|
||||
await validateObject(GetRCloneRemoteConfigDto, input);
|
||||
return this.callRcloneApi('config/get', { name: input.name });
|
||||
@@ -300,77 +372,329 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update an existing remote configuration
|
||||
*/
|
||||
async updateRemote(input: UpdateRCloneRemoteDto): Promise<any> {
|
||||
async updateRemote(input: UpdateRCloneRemoteDto): Promise<unknown> {
|
||||
await validateObject(UpdateRCloneRemoteDto, input);
|
||||
this.logger.log(`Updating remote: ${input.name}`);
|
||||
const params = {
|
||||
|
||||
return this.callRcloneApi('config/update', {
|
||||
name: input.name,
|
||||
...input.parameters,
|
||||
};
|
||||
return this.callRcloneApi('config/update', params);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a remote configuration
|
||||
*/
|
||||
async deleteRemote(input: DeleteRCloneRemoteDto): Promise<any> {
|
||||
async deleteRemote(input: DeleteRCloneRemoteDto): Promise<unknown> {
|
||||
await validateObject(DeleteRCloneRemoteDto, input);
|
||||
this.logger.log(`Deleting remote: ${input.name}`);
|
||||
return this.callRcloneApi('config/delete', { name: input.name });
|
||||
}
|
||||
|
||||
/**
|
||||
* Start a backup operation using sync/copy
|
||||
* This copies a directory from source to destination
|
||||
*/
|
||||
async startBackup(input: RCloneStartBackupInput): Promise<any> {
|
||||
async startBackup(input: RCloneStartBackupInput): Promise<unknown> {
|
||||
await validateObject(RCloneStartBackupInput, input);
|
||||
this.logger.log(`Starting backup from ${input.srcPath} to ${input.dstPath}`);
|
||||
|
||||
this.logger.log(`Starting backup: ${input.srcPath} → ${input.dstPath}`);
|
||||
|
||||
const group = input.configId ? getConfigIdFromGroupId(input.configId) : 'manual';
|
||||
|
||||
const params = {
|
||||
srcFs: input.srcPath,
|
||||
dstFs: input.dstPath,
|
||||
...(input.async && { _async: input.async }),
|
||||
_group: group,
|
||||
...(input.options || {}),
|
||||
};
|
||||
return this.callRcloneApi('sync/copy', params);
|
||||
|
||||
const result = await this.callRcloneApi('sync/copy', params);
|
||||
const jobId = result.jobid || result.jobId || 'unknown';
|
||||
this.logger.log(`Backup job created with ID: ${jobId} in group: ${group}`);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the status of a running job
|
||||
* Gets enhanced job status with computed fields
|
||||
*/
|
||||
async getJobStatus(input: GetRCloneJobStatusDto): Promise<any> {
|
||||
await validateObject(GetRCloneJobStatusDto, input);
|
||||
return this.callRcloneApi('job/status', { jobid: input.jobId });
|
||||
async getEnhancedJobStatus(jobId: string, configId?: string): Promise<RCloneJob | null> {
|
||||
try {
|
||||
await validateObject(GetRCloneJobStatusDto, { jobId });
|
||||
|
||||
if (isBackupJobGroup(jobId)) {
|
||||
try {
|
||||
const stats = await this.callRcloneApi('core/stats', { group: jobId });
|
||||
const enhancedStats = this.statusService.enhanceStatsWithFormattedFields({
|
||||
...stats,
|
||||
group: jobId,
|
||||
});
|
||||
|
||||
const job = this.statusService.transformStatsToJob(jobId, enhancedStats);
|
||||
job.configId = configId || getConfigIdFromGroupId(jobId);
|
||||
|
||||
// Add computed fields
|
||||
job.isRunning = job.status === BackupJobStatus.RUNNING;
|
||||
job.errorMessage = job.error || undefined;
|
||||
|
||||
return job;
|
||||
} catch (error) {
|
||||
this.logger.warn(`Failed to get group stats for ${jobId}: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to individual job status
|
||||
const jobStatus = await this.getIndividualJobStatus(jobId);
|
||||
const enhancedStats = jobStatus.stats
|
||||
? this.statusService.enhanceStatsWithFormattedFields(jobStatus.stats)
|
||||
: {};
|
||||
|
||||
const job = this.statusService.transformStatsToJob(jobId, enhancedStats);
|
||||
|
||||
// Add computed fields
|
||||
job.isRunning = job.status === BackupJobStatus.RUNNING;
|
||||
job.errorMessage = job.error || undefined;
|
||||
|
||||
// Add configId if provided
|
||||
if (configId) {
|
||||
job.configId = configId;
|
||||
}
|
||||
|
||||
return job;
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to fetch enhanced job status for ${jobId}: %o`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all running jobs
|
||||
*/
|
||||
async listRunningJobs(): Promise<any> {
|
||||
async getJobStatus(input: GetRCloneJobStatusDto): Promise<RCloneJob> {
|
||||
const enhancedJob = await this.getEnhancedJobStatus(input.jobId);
|
||||
if (enhancedJob) {
|
||||
return enhancedJob;
|
||||
}
|
||||
|
||||
// Final fallback
|
||||
const jobStatus = await this.getIndividualJobStatus(input.jobId);
|
||||
return this.statusService.parseJobWithStats(input.jobId, jobStatus);
|
||||
}
|
||||
|
||||
async getIndividualJobStatus(jobId: string): Promise<RCloneJobStatusResponse> {
|
||||
this.logger.debug(`Fetching status for job ${jobId}`);
|
||||
const result = await this.callRcloneApi('job/status', { jobid: jobId });
|
||||
|
||||
if (result.error) {
|
||||
this.logger.warn(`Job ${jobId} has error: ${result.error}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async listRunningJobs(): Promise<RCloneJobListResponse> {
|
||||
this.logger.debug('Fetching job list from RClone API');
|
||||
return this.callRcloneApi('job/list');
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic method to call the RClone RC API
|
||||
*/
|
||||
private async callRcloneApi(endpoint: string, params: Record<string, any> = {}): Promise<any> {
|
||||
const url = `${this.rcloneBaseUrl}/${endpoint}`;
|
||||
async getAllJobsWithStats(): Promise<RCloneJob[]> {
|
||||
try {
|
||||
this.logger.debug(
|
||||
`Calling RClone API: ${url} with params: ${JSON.stringify(sanitizeParams(params))}`
|
||||
// Get both the job list and group list
|
||||
const [runningJobs, groupList] = await Promise.all([
|
||||
this.listRunningJobs(),
|
||||
this.callRcloneApi('core/group-list'),
|
||||
]);
|
||||
|
||||
this.logger.debug(`Running jobs: ${JSON.stringify(runningJobs)}`);
|
||||
this.logger.debug(`Group list: ${JSON.stringify(groupList)}`);
|
||||
|
||||
// Safety check: if too many groups, something is wrong
|
||||
if (groupList.groups && groupList.groups.length > 100) {
|
||||
this.logger.error(
|
||||
`DANGER: Found ${groupList.groups.length} groups, aborting to prevent job explosion`
|
||||
);
|
||||
return [];
|
||||
}
|
||||
|
||||
// Safety check: if too many individual jobs, something is wrong
|
||||
if (runningJobs.jobids && runningJobs.jobids.length > 1000) {
|
||||
this.logger.error(
|
||||
`DANGER: Found ${runningJobs.jobids.length} individual jobs, aborting to prevent performance issues`
|
||||
);
|
||||
return [];
|
||||
}
|
||||
|
||||
if (!runningJobs.jobids?.length) {
|
||||
this.logger.debug('No running jobs found');
|
||||
return [];
|
||||
}
|
||||
|
||||
const backupGroups = (groupList.groups || []).filter((group: string) =>
|
||||
isBackupJobGroup(group)
|
||||
);
|
||||
|
||||
const response = await got.post(url, {
|
||||
json: params,
|
||||
responseType: 'json',
|
||||
enableUnixSockets: true,
|
||||
headers: {
|
||||
Authorization: `Basic ${Buffer.from(`${this.rcloneUsername}:${this.rclonePassword}`).toString('base64')}`,
|
||||
},
|
||||
if (backupGroups.length === 0) {
|
||||
this.logger.debug('No backup groups found');
|
||||
return [];
|
||||
}
|
||||
|
||||
// Get group stats for all backup groups to get proper stats and group info
|
||||
const groupStatsMap = new Map<string, any>();
|
||||
await Promise.all(
|
||||
backupGroups.map(async (group: string) => {
|
||||
try {
|
||||
const stats = await this.callRcloneApi('core/stats', { group });
|
||||
groupStatsMap.set(group, stats);
|
||||
} catch (error) {
|
||||
this.logger.warn(`Failed to get stats for group ${group}: ${error}`);
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
const jobs: RCloneJob[] = [];
|
||||
|
||||
// For each backup group, create a job entry with proper stats
|
||||
backupGroups.forEach((group) => {
|
||||
const groupStats = groupStatsMap.get(group);
|
||||
if (!groupStats) return;
|
||||
|
||||
this.logger.debug(`Processing group ${group}: stats=${JSON.stringify(groupStats)}`);
|
||||
|
||||
const extractedConfigId = getConfigIdFromGroupId(group);
|
||||
|
||||
const enhancedStats = this.statusService.enhanceStatsWithFormattedFields({
|
||||
...groupStats,
|
||||
group,
|
||||
});
|
||||
|
||||
const job = this.statusService.transformStatsToJob(group, enhancedStats);
|
||||
job.configId = extractedConfigId;
|
||||
|
||||
// Only include jobs that are truly active (not completed)
|
||||
const isActivelyTransferring = groupStats.transferring?.length > 0;
|
||||
const isActivelyChecking = groupStats.checking?.length > 0;
|
||||
const hasActiveSpeed = groupStats.speed > 0;
|
||||
const isNotFinished = !groupStats.finished && groupStats.fatalError !== true;
|
||||
|
||||
if ((isActivelyTransferring || isActivelyChecking || hasActiveSpeed) && isNotFinished) {
|
||||
jobs.push(job);
|
||||
}
|
||||
});
|
||||
|
||||
this.logger.debug(
|
||||
`Found ${jobs.length} active backup jobs from ${backupGroups.length} groups`
|
||||
);
|
||||
return jobs;
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to get jobs with stats:', error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async stopAllJobs(): Promise<JobOperationResult> {
|
||||
const runningJobs = await this.listRunningJobs();
|
||||
|
||||
if (!runningJobs.jobids?.length) {
|
||||
this.logger.log('No running jobs to stop');
|
||||
return { stopped: [], errors: [] };
|
||||
}
|
||||
|
||||
this.logger.log(`Stopping ${runningJobs.jobids.length} running jobs`);
|
||||
return this.executeJobOperation(runningJobs.jobids, 'stop');
|
||||
}
|
||||
|
||||
async stopJob(jobId: string): Promise<JobOperationResult> {
|
||||
this.logger.log(`Stopping job: ${jobId}`);
|
||||
|
||||
if (isBackupJobGroup(jobId)) {
|
||||
// This is a group, use the stopgroup endpoint
|
||||
return this.executeGroupOperation([jobId], 'stopgroup');
|
||||
} else {
|
||||
// This is an individual job ID, use the regular stop endpoint
|
||||
return this.executeJobOperation([jobId], 'stop');
|
||||
}
|
||||
}
|
||||
|
||||
private async executeGroupOperation(
|
||||
groupNames: string[],
|
||||
operation: 'stopgroup'
|
||||
): Promise<JobOperationResult> {
|
||||
const stopped: string[] = [];
|
||||
const errors: string[] = [];
|
||||
|
||||
const promises = groupNames.map(async (groupName) => {
|
||||
try {
|
||||
await this.callRcloneApi(`job/${operation}`, { group: groupName });
|
||||
stopped.push(groupName);
|
||||
this.logger.log(`${operation}ped group: ${groupName}`);
|
||||
} catch (error) {
|
||||
const errorMsg = `Failed to ${operation} group ${groupName}: ${error}`;
|
||||
errors.push(errorMsg);
|
||||
this.logger.error(errorMsg);
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.allSettled(promises);
|
||||
return { stopped, errors };
|
||||
}
|
||||
|
||||
private async executeJobOperation(
|
||||
jobIds: (string | number)[],
|
||||
operation: 'stop'
|
||||
): Promise<JobOperationResult> {
|
||||
const stopped: string[] = [];
|
||||
const errors: string[] = [];
|
||||
|
||||
const promises = jobIds.map(async (jobId) => {
|
||||
try {
|
||||
await this.callRcloneApi(`job/${operation}`, { jobid: jobId });
|
||||
stopped.push(String(jobId));
|
||||
this.logger.log(`${operation}ped job: ${jobId}`);
|
||||
} catch (error) {
|
||||
const errorMsg = `Failed to ${operation} job ${jobId}: ${error}`;
|
||||
errors.push(errorMsg);
|
||||
this.logger.error(errorMsg);
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.allSettled(promises);
|
||||
return { stopped, errors };
|
||||
}
|
||||
|
||||
async getBackupStatus(): Promise<BackupStatusResult> {
|
||||
const runningJobs = await this.listRunningJobs();
|
||||
|
||||
if (!runningJobs.jobids?.length) {
|
||||
return this.statusService.parseBackupStatus(runningJobs, []);
|
||||
}
|
||||
|
||||
const jobStatuses = await Promise.allSettled(
|
||||
runningJobs.jobids.map((jobId) => this.getIndividualJobStatus(String(jobId)))
|
||||
);
|
||||
|
||||
return this.statusService.parseBackupStatus(runningJobs, jobStatuses);
|
||||
}
|
||||
|
||||
private async callRcloneApi(endpoint: string, params: Record<string, unknown> = {}): Promise<any> {
|
||||
const url = `${this.rcloneBaseUrl}/${endpoint}`;
|
||||
|
||||
// Unix sockets don't require HTTP authentication - the socket itself provides security
|
||||
const isUnixSocket = this.rcloneSocketPath && this.rcloneSocketPath.startsWith('/');
|
||||
|
||||
const requestOptions: any = {
|
||||
json: params,
|
||||
responseType: 'json',
|
||||
enableUnixSockets: true,
|
||||
};
|
||||
|
||||
// Only add authentication headers for non-Unix socket connections
|
||||
if (!isUnixSocket && this.rcloneUsername && this.rclonePassword) {
|
||||
const authString = `${this.rcloneUsername}:${this.rclonePassword}`;
|
||||
const authHeader = `Basic ${Buffer.from(authString).toString('base64')}`;
|
||||
requestOptions.headers = {
|
||||
Authorization: authHeader,
|
||||
};
|
||||
this.logger.debug(
|
||||
`Calling RClone API: ${endpoint} with auth header: ${authHeader.substring(0, 20)}...`
|
||||
);
|
||||
} else {
|
||||
this.logger.debug(`Calling RClone API: ${endpoint} via Unix socket (no auth required)`);
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await got.post(url, requestOptions);
|
||||
return response.body;
|
||||
} catch (error: unknown) {
|
||||
this.handleApiError(error, endpoint, params);
|
||||
@@ -378,54 +702,108 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
}
|
||||
|
||||
private handleApiError(error: unknown, endpoint: string, params: Record<string, unknown>): never {
|
||||
const sanitizedParams = sanitizeParams(params);
|
||||
|
||||
if (error instanceof HTTPError) {
|
||||
const statusCode = error.response.statusCode;
|
||||
const rcloneError = this.extractRcloneError(error.response.body, params);
|
||||
const detailedErrorMessage = `Rclone API Error (${endpoint}, HTTP ${statusCode}): ${rcloneError}`;
|
||||
const message = `Rclone API Error (${endpoint}, HTTP ${statusCode}): ${rcloneError}`;
|
||||
|
||||
const sanitizedParams = sanitizeParams(params);
|
||||
this.logger.error(
|
||||
`Original ${detailedErrorMessage} | Params: ${JSON.stringify(sanitizedParams)}`,
|
||||
error.stack
|
||||
);
|
||||
|
||||
throw new Error(detailedErrorMessage);
|
||||
} else if (error instanceof Error) {
|
||||
const detailedErrorMessage = `Error calling RClone API (${endpoint}) with params ${JSON.stringify(sanitizeParams(params))}: ${error.message}`;
|
||||
this.logger.error(detailedErrorMessage, error.stack);
|
||||
throw error;
|
||||
} else {
|
||||
const detailedErrorMessage = `Unknown error calling RClone API (${endpoint}) with params ${JSON.stringify(sanitizeParams(params))}: ${String(error)}`;
|
||||
this.logger.error(detailedErrorMessage);
|
||||
throw new Error(detailedErrorMessage);
|
||||
this.logger.error(`${message} | Params: ${JSON.stringify(sanitizedParams)}`, error.stack);
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
const message =
|
||||
error instanceof Error
|
||||
? `Error calling RClone API (${endpoint}): ${error.message}`
|
||||
: `Unknown error calling RClone API (${endpoint}): ${String(error)}`;
|
||||
|
||||
this.logger.error(
|
||||
`${message} | Params: ${JSON.stringify(sanitizedParams)}`,
|
||||
error instanceof Error ? error.stack : undefined
|
||||
);
|
||||
throw error instanceof Error ? error : new Error(message);
|
||||
}
|
||||
|
||||
private extractRcloneError(responseBody: unknown, fallbackParams: Record<string, unknown>): string {
|
||||
try {
|
||||
let errorBody: unknown;
|
||||
if (typeof responseBody === 'string') {
|
||||
errorBody = JSON.parse(responseBody);
|
||||
} else if (typeof responseBody === 'object' && responseBody !== null) {
|
||||
errorBody = responseBody;
|
||||
}
|
||||
const errorBody = typeof responseBody === 'string' ? JSON.parse(responseBody) : responseBody;
|
||||
|
||||
if (errorBody && typeof errorBody === 'object' && 'error' in errorBody) {
|
||||
const typedErrorBody = errorBody as { error: unknown; input?: unknown };
|
||||
let rcloneError = `Rclone Error: ${String(typedErrorBody.error)}`;
|
||||
if (typedErrorBody.input) {
|
||||
rcloneError += ` | Input: ${JSON.stringify(typedErrorBody.input)}`;
|
||||
} else if (fallbackParams) {
|
||||
rcloneError += ` | Original Params: ${JSON.stringify(fallbackParams)}`;
|
||||
const typedError = errorBody as { error: unknown; input?: unknown };
|
||||
let message = `Rclone Error: ${String(typedError.error)}`;
|
||||
|
||||
if (typedError.input) {
|
||||
message += ` | Input: ${JSON.stringify(typedError.input)}`;
|
||||
} else {
|
||||
message += ` | Params: ${JSON.stringify(fallbackParams)}`;
|
||||
}
|
||||
return rcloneError;
|
||||
} else if (responseBody) {
|
||||
return `Non-standard error response body: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`;
|
||||
} else {
|
||||
return 'Empty error response body received.';
|
||||
|
||||
return message;
|
||||
}
|
||||
} catch (parseOrAccessError) {
|
||||
return `Failed to process error response body. Raw body: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`;
|
||||
|
||||
return responseBody
|
||||
? `Non-standard error response: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`
|
||||
: 'Empty error response received';
|
||||
} catch {
|
||||
return `Failed to process error response: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`;
|
||||
}
|
||||
}
|
||||
|
||||
private async killExistingRcloneProcesses(): Promise<void> {
|
||||
try {
|
||||
this.logger.log('Checking for existing rclone processes...');
|
||||
const { stdout } = await execa('pgrep', ['-f', 'rclone.*rcd'], { reject: false });
|
||||
|
||||
if (!stdout.trim()) {
|
||||
this.logger.log('No existing rclone processes found');
|
||||
return;
|
||||
}
|
||||
|
||||
const pids = stdout
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter((pid) => pid.trim());
|
||||
this.logger.log(`Found ${pids.length} existing rclone process(es): ${pids.join(', ')}`);
|
||||
|
||||
await this.terminateProcesses(pids);
|
||||
await this.cleanupStaleSocket();
|
||||
} catch (error) {
|
||||
this.logger.warn(`Error during rclone process cleanup: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
private async terminateProcesses(pids: string[]): Promise<void> {
|
||||
for (const pid of pids) {
|
||||
try {
|
||||
this.logger.log(`Terminating rclone process PID: ${pid}`);
|
||||
|
||||
await execa('kill', ['-TERM', pid], { reject: false });
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, CONSTANTS.TIMEOUTS.GRACEFUL_SHUTDOWN)
|
||||
);
|
||||
|
||||
const { exitCode } = await execa('kill', ['-0', pid], { reject: false });
|
||||
|
||||
if (exitCode === 0) {
|
||||
this.logger.warn(`Process ${pid} still running, using SIGKILL`);
|
||||
await execa('kill', ['-KILL', pid], { reject: false });
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, CONSTANTS.TIMEOUTS.PROCESS_CLEANUP)
|
||||
);
|
||||
}
|
||||
|
||||
this.logger.log(`Successfully terminated process ${pid}`);
|
||||
} catch (error) {
|
||||
this.logger.warn(`Failed to kill process ${pid}: ${error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async cleanupStaleSocket(): Promise<void> {
|
||||
if (this.rcloneSocketPath && existsSync(this.rcloneSocketPath)) {
|
||||
await rm(this.rcloneSocketPath, { force: true });
|
||||
this.logger.log('Removed stale socket file');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,505 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import { RCloneStatusService } from '@app/unraid-api/graph/resolvers/rclone/rclone-status.service.js';
|
||||
import { RCloneJobStats } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
// Mock NestJS Logger to suppress logs during tests
|
||||
vi.mock('@nestjs/common', async (importOriginal) => {
|
||||
const original = await importOriginal<typeof import('@nestjs/common')>();
|
||||
return {
|
||||
...original,
|
||||
Logger: vi.fn(() => ({
|
||||
log: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
debug: vi.fn(),
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
describe('RCloneStatusService', () => {
|
||||
let service: RCloneStatusService;
|
||||
let mockFormatService: FormatService;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockFormatService = {
|
||||
formatBytes: vi.fn().mockImplementation((bytes: number) => `${bytes} B`),
|
||||
formatSpeed: vi.fn().mockImplementation((bytesPerSecond: number) => `${bytesPerSecond} B/s`),
|
||||
formatDuration: vi.fn().mockImplementation((seconds: number) => `${seconds}s`),
|
||||
} as any;
|
||||
|
||||
service = new RCloneStatusService(mockFormatService);
|
||||
});
|
||||
|
||||
describe('enhanceStatsWithFormattedFields', () => {
|
||||
it('should add formatted fields for all numeric stats', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
bytes: 1024,
|
||||
speed: 512,
|
||||
elapsedTime: 60,
|
||||
eta: 120,
|
||||
};
|
||||
|
||||
const result = service.enhanceStatsWithFormattedFields(stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
bytes: 1024,
|
||||
speed: 512,
|
||||
elapsedTime: 60,
|
||||
eta: 120,
|
||||
formattedBytes: '1024 B',
|
||||
formattedSpeed: '512 B/s',
|
||||
formattedElapsedTime: '60s',
|
||||
formattedEta: '120s',
|
||||
calculatedPercentage: 0,
|
||||
isActivelyRunning: true,
|
||||
isCompleted: false,
|
||||
});
|
||||
expect(mockFormatService.formatBytes).toHaveBeenCalledWith(1024);
|
||||
expect(mockFormatService.formatSpeed).toHaveBeenCalledWith(512);
|
||||
expect(mockFormatService.formatDuration).toHaveBeenCalledWith(60);
|
||||
expect(mockFormatService.formatDuration).toHaveBeenCalledWith(120);
|
||||
});
|
||||
|
||||
it('should not add formatted fields for undefined values', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
bytes: undefined,
|
||||
speed: undefined,
|
||||
elapsedTime: undefined,
|
||||
eta: undefined,
|
||||
};
|
||||
|
||||
const result = service.enhanceStatsWithFormattedFields(stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
bytes: undefined,
|
||||
speed: undefined,
|
||||
elapsedTime: undefined,
|
||||
eta: undefined,
|
||||
calculatedPercentage: 0,
|
||||
formattedElapsedTime: '0s',
|
||||
formattedEta: 'Unknown',
|
||||
formattedSpeed: '0 B/s',
|
||||
isActivelyRunning: false,
|
||||
isCompleted: false,
|
||||
});
|
||||
expect(mockFormatService.formatBytes).not.toHaveBeenCalled();
|
||||
expect(mockFormatService.formatDuration).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not add formatted fields for null values', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
bytes: null as any,
|
||||
speed: null as any,
|
||||
elapsedTime: null as any,
|
||||
eta: null as any,
|
||||
};
|
||||
|
||||
const result = service.enhanceStatsWithFormattedFields(stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
bytes: null,
|
||||
speed: null,
|
||||
elapsedTime: null,
|
||||
eta: null,
|
||||
calculatedPercentage: 0,
|
||||
formattedElapsedTime: '0s',
|
||||
formattedEta: 'Unknown',
|
||||
formattedSpeed: '0 B/s',
|
||||
isActivelyRunning: false,
|
||||
isCompleted: false,
|
||||
});
|
||||
expect(mockFormatService.formatBytes).not.toHaveBeenCalled();
|
||||
expect(mockFormatService.formatDuration).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not add formatted speed for zero speed', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
speed: 0,
|
||||
};
|
||||
|
||||
const result = service.enhanceStatsWithFormattedFields(stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
speed: 0,
|
||||
calculatedPercentage: 0,
|
||||
formattedElapsedTime: '0s',
|
||||
formattedEta: 'Unknown',
|
||||
formattedSpeed: '0 B/s',
|
||||
isActivelyRunning: false,
|
||||
isCompleted: false,
|
||||
});
|
||||
expect(mockFormatService.formatSpeed).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not add formatted eta for zero eta', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
eta: 0,
|
||||
};
|
||||
|
||||
const result = service.enhanceStatsWithFormattedFields(stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
eta: 0,
|
||||
calculatedPercentage: 0,
|
||||
formattedElapsedTime: '0s',
|
||||
formattedEta: 'Unknown',
|
||||
formattedSpeed: '0 B/s',
|
||||
isActivelyRunning: false,
|
||||
isCompleted: false,
|
||||
});
|
||||
expect(mockFormatService.formatDuration).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('transformStatsToJob', () => {
|
||||
it('should create RCloneJob with completed status when transfers match total', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
group: 'unraid-backup',
|
||||
fatalError: false,
|
||||
transfers: 5,
|
||||
totalTransfers: 5,
|
||||
errors: 0,
|
||||
percentage: 100,
|
||||
};
|
||||
|
||||
const result = service.transformStatsToJob('123', stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
id: '123',
|
||||
group: 'unraid-backup',
|
||||
stats,
|
||||
finished: true,
|
||||
success: true,
|
||||
error: undefined,
|
||||
progressPercentage: 100,
|
||||
status: BackupJobStatus.COMPLETED,
|
||||
hasRecentJob: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should create RCloneJob with running status when transfers incomplete', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
group: 'unraid-backup',
|
||||
fatalError: false,
|
||||
transfers: 3,
|
||||
totalTransfers: 5,
|
||||
errors: 0,
|
||||
percentage: 60,
|
||||
};
|
||||
|
||||
const result = service.transformStatsToJob('123', stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
id: '123',
|
||||
group: 'unraid-backup',
|
||||
stats,
|
||||
finished: false,
|
||||
success: true,
|
||||
error: undefined,
|
||||
progressPercentage: 60,
|
||||
status: BackupJobStatus.RUNNING,
|
||||
hasRecentJob: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should create RCloneJob with error status when lastError exists', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
group: 'unraid-backup',
|
||||
fatalError: false,
|
||||
transfers: 0,
|
||||
totalTransfers: 5,
|
||||
errors: 1,
|
||||
percentage: 0,
|
||||
lastError: 'Connection timeout',
|
||||
};
|
||||
|
||||
const result = service.transformStatsToJob('123', stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
id: '123',
|
||||
group: 'unraid-backup',
|
||||
stats,
|
||||
finished: false,
|
||||
success: false,
|
||||
error: 'Connection timeout',
|
||||
progressPercentage: 0,
|
||||
status: BackupJobStatus.FAILED,
|
||||
hasRecentJob: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should create RCloneJob with cancelled status when lastError is context canceled', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
group: 'unraid-backup',
|
||||
fatalError: false,
|
||||
transfers: 0,
|
||||
totalTransfers: 5,
|
||||
errors: 1,
|
||||
percentage: 0,
|
||||
lastError: 'context canceled',
|
||||
};
|
||||
|
||||
const result = service.transformStatsToJob('123', stats);
|
||||
|
||||
expect(result).toEqual({
|
||||
id: '123',
|
||||
group: 'unraid-backup',
|
||||
stats,
|
||||
finished: false,
|
||||
success: false,
|
||||
error: 'context canceled',
|
||||
progressPercentage: 0,
|
||||
status: BackupJobStatus.CANCELLED,
|
||||
hasRecentJob: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle numeric job ID', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
fatalError: false,
|
||||
transfers: 0,
|
||||
totalTransfers: 0,
|
||||
};
|
||||
|
||||
const result = service.transformStatsToJob(456, stats);
|
||||
|
||||
expect(result.id).toBe('456');
|
||||
});
|
||||
|
||||
it('should handle missing group', () => {
|
||||
const stats: RCloneJobStats = {
|
||||
fatalError: false,
|
||||
transfers: 0,
|
||||
totalTransfers: 0,
|
||||
};
|
||||
|
||||
const result = service.transformStatsToJob('123', stats);
|
||||
|
||||
expect(result.group).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('calculateCombinedStats', () => {
|
||||
it('should combine stats from multiple jobs', () => {
|
||||
const mockActiveJobs = [
|
||||
{
|
||||
stats: {
|
||||
bytes: 1024,
|
||||
checks: 2,
|
||||
transfers: 3,
|
||||
totalBytes: 2048,
|
||||
totalChecks: 4,
|
||||
totalTransfers: 6,
|
||||
speed: 100,
|
||||
eta: 120,
|
||||
},
|
||||
},
|
||||
{
|
||||
stats: {
|
||||
bytes: 512,
|
||||
checks: 1,
|
||||
transfers: 2,
|
||||
totalBytes: 1024,
|
||||
totalChecks: 2,
|
||||
totalTransfers: 4,
|
||||
speed: 200,
|
||||
eta: 60,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const result = service.calculateCombinedStats(mockActiveJobs);
|
||||
|
||||
expect(result).toEqual({
|
||||
bytes: 1536,
|
||||
checks: 3,
|
||||
transfers: 5,
|
||||
totalBytes: 3072,
|
||||
totalChecks: 6,
|
||||
totalTransfers: 10,
|
||||
speed: 200, // Max speed
|
||||
eta: 120, // Max eta
|
||||
});
|
||||
});
|
||||
|
||||
it('should return null for empty jobs array', () => {
|
||||
const result = service.calculateCombinedStats([]);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null when no valid stats', () => {
|
||||
const mockActiveJobs = [{ stats: null as any }, { stats: undefined as any }];
|
||||
const result = service.calculateCombinedStats(mockActiveJobs);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseActiveJobs', () => {
|
||||
it('should return active jobs that are not finished', () => {
|
||||
const mockJobStatuses = [
|
||||
{ status: 'fulfilled', value: { id: '1', finished: false } },
|
||||
{ status: 'fulfilled', value: { id: '2', finished: true } },
|
||||
{ status: 'rejected', reason: 'Error' },
|
||||
] as PromiseSettledResult<any>[];
|
||||
|
||||
const result = service.parseActiveJobs(mockJobStatuses);
|
||||
|
||||
expect(result).toEqual([{ id: '1', finished: false }]);
|
||||
});
|
||||
|
||||
it('should return empty array when all jobs are finished', () => {
|
||||
const mockJobStatuses = [
|
||||
{ status: 'fulfilled', value: { id: '1', finished: true } },
|
||||
] as PromiseSettledResult<any>[];
|
||||
|
||||
const result = service.parseActiveJobs(mockJobStatuses);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseBackupStatus', () => {
|
||||
it('should return running status when active jobs exist', () => {
|
||||
const mockRunningJobs = { jobids: ['123', '456'] };
|
||||
const mockJobStatuses = [
|
||||
{ status: 'fulfilled', value: { id: '123', finished: false, stats: { bytes: 1024 } } },
|
||||
{ status: 'fulfilled', value: { id: '456', finished: false, stats: { bytes: 512 } } },
|
||||
] as PromiseSettledResult<any>[];
|
||||
|
||||
const result = service.parseBackupStatus(mockRunningJobs, mockJobStatuses);
|
||||
|
||||
expect(result).toEqual({
|
||||
isRunning: true,
|
||||
stats: expect.objectContaining({ bytes: 1536 }),
|
||||
jobCount: 2,
|
||||
activeJobs: expect.arrayContaining([
|
||||
expect.objectContaining({ id: '123', finished: false }),
|
||||
expect.objectContaining({ id: '456', finished: false }),
|
||||
]),
|
||||
});
|
||||
});
|
||||
|
||||
it('should return not running when no job IDs', () => {
|
||||
const mockRunningJobs = { jobids: [] };
|
||||
const mockJobStatuses = [] as PromiseSettledResult<any>[];
|
||||
|
||||
const result = service.parseBackupStatus(mockRunningJobs, mockJobStatuses);
|
||||
|
||||
expect(result).toEqual({
|
||||
isRunning: false,
|
||||
stats: null,
|
||||
jobCount: 0,
|
||||
activeJobs: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseJobWithStats', () => {
|
||||
it('should parse job with enhanced stats', () => {
|
||||
const mockJobStatus = {
|
||||
stats: { bytes: 1024, speed: 512 },
|
||||
};
|
||||
|
||||
const result = service.parseJobWithStats('123', mockJobStatus);
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
id: '123',
|
||||
stats: expect.objectContaining({
|
||||
bytes: 1024,
|
||||
speed: 512,
|
||||
formattedBytes: '1024 B',
|
||||
formattedSpeed: '512 B/s',
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle missing stats', () => {
|
||||
const mockJobStatus = {};
|
||||
|
||||
const result = service.parseJobWithStats('123', mockJobStatus);
|
||||
|
||||
expect(result.id).toBe('123');
|
||||
expect(result.stats).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseAllJobsWithStats', () => {
|
||||
it('should return jobs when job IDs exist', () => {
|
||||
const mockRunningJobs = { jobids: ['123', '456'] };
|
||||
const mockJobs = [
|
||||
{ id: '123', group: 'unraid-backup' },
|
||||
{ id: '456', group: 'unraid-backup' },
|
||||
] as any[];
|
||||
|
||||
const result = service.parseAllJobsWithStats(mockRunningJobs, mockJobs);
|
||||
|
||||
expect(result).toEqual(mockJobs);
|
||||
});
|
||||
|
||||
it('should return empty array when no job IDs', () => {
|
||||
const mockRunningJobs = { jobids: [] };
|
||||
const mockJobs = [] as any[];
|
||||
|
||||
const result = service.parseAllJobsWithStats(mockRunningJobs, mockJobs);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseJobsWithStats', () => {
|
||||
it('should parse fulfilled job statuses', () => {
|
||||
const mockJobStatuses = [
|
||||
{ status: 'fulfilled', value: { id: '123', stats: { bytes: 1024 } } },
|
||||
{ status: 'fulfilled', value: { id: '456', stats: { bytes: 512 } } },
|
||||
{ status: 'rejected', reason: 'Error' },
|
||||
] as PromiseSettledResult<any>[];
|
||||
|
||||
const result = service.parseJobsWithStats(mockJobStatuses);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toEqual(
|
||||
expect.objectContaining({
|
||||
id: '123',
|
||||
stats: expect.objectContaining({ bytes: 1024, formattedBytes: '1024 B' }),
|
||||
})
|
||||
);
|
||||
expect(result[1]).toEqual(
|
||||
expect.objectContaining({
|
||||
id: '456',
|
||||
stats: expect.objectContaining({ bytes: 512, formattedBytes: '512 B' }),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle rejected statuses gracefully', () => {
|
||||
const mockJobStatuses = [
|
||||
{ status: 'rejected', reason: 'Error' },
|
||||
] as PromiseSettledResult<any>[];
|
||||
|
||||
const result = service.parseJobsWithStats(mockJobStatuses);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getBackupStatus', () => {
|
||||
it('should return default backup status', () => {
|
||||
const result = service.getBackupStatus();
|
||||
|
||||
expect(result).toEqual({
|
||||
isRunning: false,
|
||||
stats: null,
|
||||
jobCount: 0,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,268 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
|
||||
import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import {
|
||||
RCloneJob,
|
||||
RCloneJobListResponse,
|
||||
RCloneJobStats,
|
||||
RCloneJobWithStats,
|
||||
} from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
// Internal interface for job status response from RClone API
|
||||
interface RCloneJobStatusResponse {
|
||||
id?: string | number;
|
||||
group?: string;
|
||||
stats?: RCloneJobStats;
|
||||
finished?: boolean;
|
||||
error?: string;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
interface BackupStatusResult {
|
||||
isRunning: boolean;
|
||||
stats: RCloneJobStats | null;
|
||||
jobCount: number;
|
||||
activeJobs: RCloneJobStatusResponse[];
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class RCloneStatusService {
|
||||
private readonly logger = new Logger(RCloneStatusService.name);
|
||||
|
||||
constructor(private readonly formatService: FormatService) {}
|
||||
|
||||
enhanceStatsWithFormattedFields(stats: RCloneJobStats): RCloneJobStats {
|
||||
const enhancedStats = { ...stats };
|
||||
|
||||
const isFinished =
|
||||
stats.fatalError === false &&
|
||||
stats.transfers === (stats.totalTransfers || 0) &&
|
||||
(stats.totalTransfers || 0) > 0;
|
||||
|
||||
// Format bytes
|
||||
if (stats.bytes !== undefined && stats.bytes !== null) {
|
||||
enhancedStats.formattedBytes = this.formatService.formatBytes(stats.bytes);
|
||||
}
|
||||
|
||||
// Handle speed formatting and reset for finished jobs
|
||||
if (isFinished && stats.speed !== undefined && stats.speed !== null) {
|
||||
enhancedStats.speed = 0;
|
||||
}
|
||||
|
||||
if (stats.speed !== undefined && stats.speed !== null && stats.speed > 0) {
|
||||
enhancedStats.formattedSpeed = this.formatService.formatSpeed(stats.speed);
|
||||
} else {
|
||||
enhancedStats.formattedSpeed = '0 B/s';
|
||||
}
|
||||
|
||||
// Format elapsed time
|
||||
if (stats.elapsedTime !== undefined && stats.elapsedTime !== null) {
|
||||
enhancedStats.formattedElapsedTime = this.formatService.formatDuration(stats.elapsedTime);
|
||||
} else {
|
||||
enhancedStats.formattedElapsedTime = '0s';
|
||||
}
|
||||
|
||||
// Format ETA
|
||||
if (stats.eta !== undefined && stats.eta !== null && stats.eta > 0) {
|
||||
enhancedStats.formattedEta = this.formatService.formatDuration(stats.eta);
|
||||
} else {
|
||||
enhancedStats.formattedEta = 'Unknown';
|
||||
}
|
||||
|
||||
// Calculate percentage fallback (what frontend currently does)
|
||||
let calculatedPercentage = stats.percentage;
|
||||
if (calculatedPercentage === null || calculatedPercentage === undefined) {
|
||||
if (stats.bytes && stats.totalBytes && stats.totalBytes > 0) {
|
||||
calculatedPercentage = Math.round((stats.bytes / stats.totalBytes) * 100);
|
||||
}
|
||||
}
|
||||
|
||||
// For completed jobs, ensure percentage is 100
|
||||
if (isFinished && calculatedPercentage !== null && calculatedPercentage !== undefined) {
|
||||
calculatedPercentage = 100;
|
||||
}
|
||||
|
||||
enhancedStats.calculatedPercentage = Math.round(calculatedPercentage || 0);
|
||||
|
||||
// Determine if actively running (what frontend currently calculates)
|
||||
const isActivelyTransferring =
|
||||
stats.transferring && Array.isArray(stats.transferring) && stats.transferring.length > 0;
|
||||
const isActivelyChecking =
|
||||
stats.checking && Array.isArray(stats.checking) && stats.checking.length > 0;
|
||||
const hasActiveSpeed = (stats.speed || 0) > 0;
|
||||
const isNotFinished = !isFinished && stats.fatalError !== true;
|
||||
|
||||
enhancedStats.isActivelyRunning =
|
||||
(isActivelyTransferring || isActivelyChecking || hasActiveSpeed) && isNotFinished;
|
||||
enhancedStats.isCompleted = isFinished;
|
||||
|
||||
return enhancedStats;
|
||||
}
|
||||
|
||||
transformStatsToJob(jobId: string | number, stats: RCloneJobStats): RCloneJob {
|
||||
this.logger.debug(`Stats for job ${jobId}: %o`, stats);
|
||||
const group = stats.group || undefined;
|
||||
|
||||
this.logger.debug(`Processing job ${jobId}: group="${group}"`);
|
||||
|
||||
const isFinished =
|
||||
stats.fatalError === false &&
|
||||
stats.transfers === (stats.totalTransfers || 0) &&
|
||||
(stats.totalTransfers || 0) > 0;
|
||||
|
||||
const hasError = Boolean(stats.lastError);
|
||||
const isCancelled = stats.lastError === 'context canceled';
|
||||
|
||||
// Determine status
|
||||
let status: BackupJobStatus;
|
||||
|
||||
if (hasError) {
|
||||
if (isCancelled) {
|
||||
status = BackupJobStatus.CANCELLED;
|
||||
} else {
|
||||
status = BackupJobStatus.FAILED;
|
||||
}
|
||||
} else if (isFinished || stats.calculatedPercentage === 100) {
|
||||
status = BackupJobStatus.COMPLETED;
|
||||
} else {
|
||||
status = BackupJobStatus.RUNNING;
|
||||
}
|
||||
|
||||
return {
|
||||
id: String(jobId),
|
||||
group: group,
|
||||
stats,
|
||||
finished: isFinished,
|
||||
success: stats.fatalError === false && (stats.errors || 0) === 0,
|
||||
error: stats.lastError || undefined,
|
||||
progressPercentage: stats.calculatedPercentage || stats.percentage,
|
||||
status,
|
||||
hasRecentJob: true, // If we have a job object, there's a recent job
|
||||
};
|
||||
}
|
||||
|
||||
calculateCombinedStats(activeJobs: RCloneJobStatusResponse[]): RCloneJobStats | null {
|
||||
if (activeJobs.length === 0) return null;
|
||||
|
||||
const validStats = activeJobs
|
||||
.map((job) => job.stats)
|
||||
.filter((stats): stats is RCloneJobStats => Boolean(stats));
|
||||
|
||||
if (validStats.length === 0) return null;
|
||||
|
||||
return validStats.reduce(
|
||||
(combined, stats) => ({
|
||||
bytes: (combined.bytes || 0) + (stats.bytes || 0),
|
||||
checks: (combined.checks || 0) + (stats.checks || 0),
|
||||
transfers: (combined.transfers || 0) + (stats.transfers || 0),
|
||||
totalBytes: (combined.totalBytes || 0) + (stats.totalBytes || 0),
|
||||
totalChecks: (combined.totalChecks || 0) + (stats.totalChecks || 0),
|
||||
totalTransfers: (combined.totalTransfers || 0) + (stats.totalTransfers || 0),
|
||||
speed: Math.max(combined.speed || 0, stats.speed || 0),
|
||||
eta: Math.max(combined.eta || 0, stats.eta || 0),
|
||||
}),
|
||||
{} as RCloneJobStats
|
||||
);
|
||||
}
|
||||
|
||||
parseActiveJobs(
|
||||
jobStatuses: PromiseSettledResult<RCloneJobStatusResponse>[]
|
||||
): RCloneJobStatusResponse[] {
|
||||
const activeJobs: RCloneJobStatusResponse[] = [];
|
||||
|
||||
this.logger.debug(`Job statuses: ${JSON.stringify(jobStatuses)}`);
|
||||
|
||||
jobStatuses.forEach((result, index) => {
|
||||
if (result.status === 'fulfilled' && !result.value.finished) {
|
||||
activeJobs.push(result.value);
|
||||
} else if (result.status === 'rejected') {
|
||||
this.logger.warn(`Failed to get status for job ${index}: ${result.reason}`);
|
||||
}
|
||||
});
|
||||
|
||||
return activeJobs;
|
||||
}
|
||||
|
||||
parseBackupStatus(
|
||||
runningJobs: RCloneJobListResponse,
|
||||
jobStatuses: PromiseSettledResult<RCloneJobStatusResponse>[]
|
||||
): BackupStatusResult {
|
||||
if (!runningJobs.jobids?.length) {
|
||||
return {
|
||||
isRunning: false,
|
||||
stats: null,
|
||||
jobCount: 0,
|
||||
activeJobs: [],
|
||||
};
|
||||
}
|
||||
|
||||
const activeJobs = this.parseActiveJobs(jobStatuses);
|
||||
const combinedStats = this.calculateCombinedStats(activeJobs);
|
||||
|
||||
return {
|
||||
isRunning: activeJobs.length > 0,
|
||||
stats: combinedStats,
|
||||
jobCount: activeJobs.length,
|
||||
activeJobs,
|
||||
};
|
||||
}
|
||||
|
||||
parseJobWithStats(jobId: string, jobStatus: RCloneJobStatusResponse): RCloneJob {
|
||||
const stats = jobStatus.stats ? this.enhanceStatsWithFormattedFields(jobStatus.stats) : {};
|
||||
return this.transformStatsToJob(jobId, stats);
|
||||
}
|
||||
|
||||
parseAllJobsWithStats(runningJobs: RCloneJobListResponse, jobs: RCloneJob[]): RCloneJob[] {
|
||||
if (!runningJobs.jobids?.length) {
|
||||
this.logger.log('No active jobs found in RClone');
|
||||
return [];
|
||||
}
|
||||
|
||||
this.logger.log(
|
||||
`Found ${runningJobs.jobids.length} active jobs in RClone: [${runningJobs.jobids.join(', ')}]`
|
||||
);
|
||||
|
||||
return jobs;
|
||||
}
|
||||
|
||||
parseJobsWithStats(jobStatuses: PromiseSettledResult<RCloneJobStatusResponse>[]): RCloneJob[] {
|
||||
const allJobs: RCloneJob[] = [];
|
||||
|
||||
jobStatuses.forEach((result, index) => {
|
||||
if (result.status === 'fulfilled') {
|
||||
const jobStatus = result.value;
|
||||
const stats = jobStatus.stats
|
||||
? this.enhanceStatsWithFormattedFields(jobStatus.stats)
|
||||
: {};
|
||||
const job = this.transformStatsToJob(jobStatus.id || index, stats);
|
||||
allJobs.push(job);
|
||||
} else {
|
||||
this.logger.error(`Failed to get status for job ${index}: ${result.reason}`);
|
||||
}
|
||||
});
|
||||
|
||||
return allJobs;
|
||||
}
|
||||
|
||||
getBackupStatus(): {
|
||||
isRunning: boolean;
|
||||
stats: RCloneJobStats | null;
|
||||
jobCount: number;
|
||||
} {
|
||||
try {
|
||||
return {
|
||||
isRunning: false,
|
||||
stats: null,
|
||||
jobCount: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
this.logger.debug(`Error getting backup status: ${error}`);
|
||||
return {
|
||||
isRunning: false,
|
||||
stats: null,
|
||||
jobCount: 0,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
import { Field, ID, InputType, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { type Layout } from '@jsonforms/core';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
import { IsBoolean, IsObject, IsOptional, IsString } from 'class-validator';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
|
||||
import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js';
|
||||
import { DataSlice } from '@app/unraid-api/types/json-forms.js';
|
||||
|
||||
@ObjectType()
|
||||
@@ -147,6 +149,19 @@ export class RCloneStartBackupInput {
|
||||
@IsString()
|
||||
dstPath!: string;
|
||||
|
||||
@Field(() => Boolean, { nullable: true, defaultValue: false })
|
||||
@IsOptional()
|
||||
@IsBoolean()
|
||||
async?: boolean;
|
||||
|
||||
@Field(() => String, {
|
||||
nullable: true,
|
||||
description: 'Configuration ID for job grouping and identification',
|
||||
})
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
configId?: string;
|
||||
|
||||
@Field(() => GraphQLJSON, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsObject()
|
||||
@@ -206,3 +221,189 @@ export class GetRCloneJobStatusDto {
|
||||
@IsString()
|
||||
jobId!: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class RCloneJobStats {
|
||||
@Field(() => Number, { description: 'Bytes transferred', nullable: true })
|
||||
bytes?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Transfer speed in bytes/sec', nullable: true })
|
||||
speed?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Estimated time to completion in seconds', nullable: true })
|
||||
eta?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Elapsed time in seconds', nullable: true })
|
||||
elapsedTime?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Progress percentage (0-100)', nullable: true })
|
||||
percentage?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Number of checks completed', nullable: true })
|
||||
checks?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Number of deletes completed', nullable: true })
|
||||
deletes?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Number of errors encountered', nullable: true })
|
||||
errors?: number;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether a fatal error occurred', nullable: true })
|
||||
fatalError?: boolean;
|
||||
|
||||
@Field(() => String, { description: 'Last error message', nullable: true })
|
||||
lastError?: string;
|
||||
|
||||
@Field(() => Number, { description: 'Number of renames completed', nullable: true })
|
||||
renames?: number;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether there is a retry error', nullable: true })
|
||||
retryError?: boolean;
|
||||
|
||||
@Field(() => Number, { description: 'Number of server-side copies', nullable: true })
|
||||
serverSideCopies?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Bytes in server-side copies', nullable: true })
|
||||
serverSideCopyBytes?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Number of server-side moves', nullable: true })
|
||||
serverSideMoves?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Bytes in server-side moves', nullable: true })
|
||||
serverSideMoveBytes?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Total bytes to transfer', nullable: true })
|
||||
totalBytes?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Total checks to perform', nullable: true })
|
||||
totalChecks?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Total transfers to perform', nullable: true })
|
||||
totalTransfers?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Time spent transferring in seconds', nullable: true })
|
||||
transferTime?: number;
|
||||
|
||||
@Field(() => Number, { description: 'Number of transfers completed', nullable: true })
|
||||
transfers?: number;
|
||||
|
||||
@Field(() => GraphQLJSON, { description: 'Currently transferring files', nullable: true })
|
||||
transferring?: any[];
|
||||
|
||||
@Field(() => GraphQLJSON, { description: 'Currently checking files', nullable: true })
|
||||
checking?: any[];
|
||||
|
||||
// Formatted fields
|
||||
@Field(() => String, { description: 'Human-readable bytes transferred', nullable: true })
|
||||
formattedBytes?: string;
|
||||
|
||||
@Field(() => String, { description: 'Human-readable transfer speed', nullable: true })
|
||||
formattedSpeed?: string;
|
||||
|
||||
@Field(() => String, { description: 'Human-readable elapsed time', nullable: true })
|
||||
formattedElapsedTime?: string;
|
||||
|
||||
@Field(() => String, { description: 'Human-readable ETA', nullable: true })
|
||||
formattedEta?: string;
|
||||
|
||||
// Computed fields that frontend currently calculates
|
||||
@Field(() => Number, {
|
||||
description: 'Calculated percentage (fallback when percentage is null)',
|
||||
nullable: true,
|
||||
})
|
||||
calculatedPercentage?: number;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the job is actively running', nullable: true })
|
||||
isActivelyRunning?: boolean;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the job is completed', nullable: true })
|
||||
isCompleted?: boolean;
|
||||
|
||||
// Allow additional fields
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class RCloneJob {
|
||||
@Field(() => PrefixedID, { description: 'Job ID' })
|
||||
id!: string;
|
||||
|
||||
@Field(() => String, { description: 'RClone group for the job', nullable: true })
|
||||
group?: string;
|
||||
|
||||
@Field(() => RCloneJobStats, { description: 'Job status and statistics', nullable: true })
|
||||
stats?: RCloneJobStats;
|
||||
|
||||
@Field(() => Number, { description: 'Progress percentage (0-100)', nullable: true })
|
||||
progressPercentage?: number;
|
||||
|
||||
@Field(() => PrefixedID, { description: 'Configuration ID that triggered this job', nullable: true })
|
||||
configId?: string;
|
||||
|
||||
@Field(() => BackupJobStatus, { description: 'Current status of the job', nullable: true })
|
||||
status?: BackupJobStatus;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the job is finished', nullable: true })
|
||||
finished?: boolean;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the job was successful', nullable: true })
|
||||
success?: boolean;
|
||||
|
||||
@Field(() => String, { description: 'Error message if job failed', nullable: true })
|
||||
error?: string;
|
||||
|
||||
// Computed fields that frontend currently calculates
|
||||
@Field(() => Boolean, { description: 'Whether the job is actively running', nullable: true })
|
||||
isRunning?: boolean;
|
||||
|
||||
@Field(() => String, { description: 'Error message for display', nullable: true })
|
||||
errorMessage?: string;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether there is a recent job', nullable: true })
|
||||
hasRecentJob?: boolean;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class RCloneJobStatusDto {
|
||||
@Field(() => Number, { description: 'Job ID' })
|
||||
id!: number;
|
||||
|
||||
@Field(() => String, { description: 'RClone group for the job' })
|
||||
group!: string;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the job is finished' })
|
||||
finished!: boolean;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the job was successful' })
|
||||
success!: boolean;
|
||||
|
||||
@Field(() => String, { description: 'Error message if any' })
|
||||
error!: string;
|
||||
|
||||
@Field(() => Number, { description: 'Job duration in seconds' })
|
||||
duration!: number;
|
||||
|
||||
@Field(() => String, { description: 'Job start time in ISO format' })
|
||||
startTime!: string;
|
||||
|
||||
@Field(() => String, { description: 'Job end time in ISO format' })
|
||||
endTime!: string;
|
||||
|
||||
@Field(() => GraphQLJSON, { description: 'Job output data', nullable: true })
|
||||
output?: Record<string, any>;
|
||||
}
|
||||
|
||||
// API Response Types (for internal use)
|
||||
export interface RCloneJobListResponse {
|
||||
jobids: (string | number)[];
|
||||
}
|
||||
|
||||
export interface RCloneJobWithStats {
|
||||
jobId: string | number;
|
||||
stats: RCloneJobStats;
|
||||
}
|
||||
|
||||
export interface RCloneJobsWithStatsResponse {
|
||||
jobids: (string | number)[];
|
||||
stats: RCloneJobStats[];
|
||||
}
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
import { forwardRef, Module } from '@nestjs/common';
|
||||
|
||||
import { BackupSourceModule } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.module.js';
|
||||
import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js';
|
||||
import { RCloneFormService } from '@app/unraid-api/graph/resolvers/rclone/rclone-form.service.js';
|
||||
import { RCloneStatusService } from '@app/unraid-api/graph/resolvers/rclone/rclone-status.service.js';
|
||||
import { RCloneMutationsResolver } from '@app/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.js';
|
||||
import { RCloneBackupSettingsResolver } from '@app/unraid-api/graph/resolvers/rclone/rclone.resolver.js';
|
||||
import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js';
|
||||
import { UtilsModule } from '@app/unraid-api/utils/utils.module.js';
|
||||
|
||||
@Module({
|
||||
imports: [],
|
||||
imports: [UtilsModule, forwardRef(() => BackupSourceModule)],
|
||||
providers: [
|
||||
RCloneService,
|
||||
RCloneApiService,
|
||||
RCloneStatusService,
|
||||
RCloneFormService,
|
||||
RCloneBackupSettingsResolver,
|
||||
RCloneMutationsResolver,
|
||||
],
|
||||
exports: [RCloneService, RCloneApiService],
|
||||
exports: [RCloneService, RCloneApiService, RCloneStatusService],
|
||||
})
|
||||
export class RCloneModule {}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { Args, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { Resource } from '@unraid/shared/graphql.model';
|
||||
import {
|
||||
AuthActionVerb,
|
||||
AuthPossession,
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
CreateRCloneRemoteInput,
|
||||
DeleteRCloneRemoteInput,
|
||||
RCloneRemote,
|
||||
RCloneRemoteConfig,
|
||||
} from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
|
||||
/**
|
||||
@@ -38,7 +39,7 @@ export class RCloneMutationsResolver {
|
||||
name: input.name,
|
||||
type: input.type,
|
||||
parameters: {},
|
||||
config,
|
||||
config: config as RCloneRemoteConfig,
|
||||
};
|
||||
} catch (error) {
|
||||
this.logger.error(`Error creating remote: ${error}`);
|
||||
|
||||
@@ -5,13 +5,13 @@ import { type Layout } from '@jsonforms/core';
|
||||
import type { SettingSlice } from '@app/unraid-api/types/json-forms.js';
|
||||
import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js';
|
||||
import { RCloneFormService } from '@app/unraid-api/graph/resolvers/rclone/rclone-form.service.js';
|
||||
import { RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
import { RCloneJob, RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
|
||||
/**
|
||||
* Types for rclone backup configuration UI
|
||||
*/
|
||||
export interface RcloneBackupConfigValues {
|
||||
configStep: number;
|
||||
configStep: { current: number; total: number };
|
||||
showAdvanced: boolean;
|
||||
name?: string;
|
||||
type?: string;
|
||||
@@ -48,7 +48,7 @@ export class RCloneService {
|
||||
*/
|
||||
async onModuleInit(): Promise<void> {
|
||||
try {
|
||||
if (!this.rcloneApiService.initialized) {
|
||||
if (!this.rcloneApiService.isInitialized) {
|
||||
this.logger.warn(
|
||||
'RClone API service is not initialized, skipping provider info loading'
|
||||
);
|
||||
@@ -83,7 +83,7 @@ export class RCloneService {
|
||||
*/
|
||||
async getCurrentSettings(): Promise<RcloneBackupConfigValues> {
|
||||
return {
|
||||
configStep: 0,
|
||||
configStep: { current: 0, total: 0 },
|
||||
showAdvanced: false,
|
||||
};
|
||||
}
|
||||
@@ -125,4 +125,11 @@ export class RCloneService {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets enhanced job status with computed fields
|
||||
*/
|
||||
async getEnhancedJobStatus(jobId: string, configId?: string): Promise<RCloneJob | null> {
|
||||
return this.rcloneApiService.getEnhancedJobStatus(jobId, configId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,14 @@ import { Module } from '@nestjs/common';
|
||||
|
||||
import { AuthModule } from '@app/unraid-api/auth/auth.module.js';
|
||||
import { ApiKeyModule } from '@app/unraid-api/graph/resolvers/api-key/api-key.module.js';
|
||||
import { ApiKeyResolver } from '@app/unraid-api/graph/resolvers/api-key/api-key.resolver.js';
|
||||
import { ArrayModule } from '@app/unraid-api/graph/resolvers/array/array.module.js';
|
||||
import { BackupModule } from '@app/unraid-api/graph/resolvers/backup/backup.module.js';
|
||||
import { ConfigResolver } from '@app/unraid-api/graph/resolvers/config/config.resolver.js';
|
||||
import { CustomizationModule } from '@app/unraid-api/graph/resolvers/customization/customization.module.js';
|
||||
import { DisksModule } from '@app/unraid-api/graph/resolvers/disks/disks.module.js';
|
||||
import { DisplayResolver } from '@app/unraid-api/graph/resolvers/display/display.resolver.js';
|
||||
import { DisplayService } from '@app/unraid-api/graph/resolvers/display/display.service.js';
|
||||
import { DockerModule } from '@app/unraid-api/graph/resolvers/docker/docker.module.js';
|
||||
import { FlashBackupModule } from '@app/unraid-api/graph/resolvers/flash-backup/flash-backup.module.js';
|
||||
import { FlashResolver } from '@app/unraid-api/graph/resolvers/flash/flash.resolver.js';
|
||||
import { DevicesResolver } from '@app/unraid-api/graph/resolvers/info/devices.resolver.js';
|
||||
import { DevicesService } from '@app/unraid-api/graph/resolvers/info/devices.service.js';
|
||||
@@ -34,16 +33,18 @@ import { VmsService } from '@app/unraid-api/graph/resolvers/vms/vms.service.js';
|
||||
import { ServicesResolver } from '@app/unraid-api/graph/services/services.resolver.js';
|
||||
import { SharesResolver } from '@app/unraid-api/graph/shares/shares.resolver.js';
|
||||
import { MeResolver } from '@app/unraid-api/graph/user/user.resolver.js';
|
||||
import { UtilsModule } from '@app/unraid-api/utils/utils.module.js';
|
||||
|
||||
@Module({
|
||||
imports: [
|
||||
UtilsModule,
|
||||
ArrayModule,
|
||||
ApiKeyModule,
|
||||
AuthModule,
|
||||
BackupModule,
|
||||
CustomizationModule,
|
||||
DockerModule,
|
||||
DisksModule,
|
||||
FlashBackupModule,
|
||||
RCloneModule,
|
||||
SettingsModule,
|
||||
],
|
||||
|
||||
10
api/src/unraid-api/graph/utils/utils.module.ts
Normal file
10
api/src/unraid-api/graph/utils/utils.module.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { Global, Module } from '@nestjs/common';
|
||||
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
@Global()
|
||||
@Module({
|
||||
providers: [FormatService],
|
||||
exports: [FormatService],
|
||||
})
|
||||
export class UtilsModule {}
|
||||
@@ -18,7 +18,8 @@ export async function bootstrapNestServer(): Promise<NestFastifyApplication> {
|
||||
|
||||
const app = await NestFactory.create<NestFastifyApplication>(AppModule, new FastifyAdapter(), {
|
||||
bufferLogs: false,
|
||||
...(LOG_LEVEL !== 'TRACE' ? { logger: false } : {}),
|
||||
|
||||
...(LOG_LEVEL !== 'DEBUG' ? { logger: false } : {}),
|
||||
});
|
||||
|
||||
// Enable validation globally
|
||||
|
||||
@@ -65,7 +65,16 @@ export class PluginService {
|
||||
* @returns A tuple of the plugin name and version.
|
||||
*/
|
||||
static async listPlugins(): Promise<[string, string][]> {
|
||||
const { plugins = [] } = await loadApiConfig();
|
||||
let plugins: string[] = [];
|
||||
try {
|
||||
const config = await loadApiConfig();
|
||||
plugins = config.plugins || [];
|
||||
} catch (error) {
|
||||
PluginService.logger.error(
|
||||
'Failed to load API config for plugin discovery, using empty list:',
|
||||
error
|
||||
);
|
||||
}
|
||||
const pluginNames = new Set(
|
||||
plugins.map((plugin) => {
|
||||
const { name } = parsePackageArg(plugin);
|
||||
|
||||
@@ -1 +1 @@
|
||||
1751630630443
|
||||
1752326314433
|
||||
@@ -1 +1 @@
|
||||
1751630630198
|
||||
1752326314052
|
||||
@@ -1 +1 @@
|
||||
1751630630343
|
||||
1752326314199
|
||||
@@ -1 +1 @@
|
||||
1751630630571
|
||||
1752326314557
|
||||
@@ -1 +1 @@
|
||||
1751630630810
|
||||
1752326314785
|
||||
@@ -65,6 +65,13 @@ if (is_localhost() && !is_good_session()) {
|
||||
return this.prependDoctypeWithPhp(source, newPhpCode);
|
||||
}
|
||||
|
||||
private addModalsWebComponent(source: string): string {
|
||||
if (source.includes('<unraid-modals>')) {
|
||||
return source;
|
||||
}
|
||||
return source.replace('<body>', '<body>\n<unraid-modals></unraid-modals>');
|
||||
}
|
||||
|
||||
private hideHeaderLogo(source: string): string {
|
||||
return source.replace(
|
||||
'<a href="https://unraid.net" target="_blank"><?readfile("$docroot/webGui/images/UN-logotype-gradient.svg")?></a>',
|
||||
@@ -72,17 +79,14 @@ if (is_localhost() && !is_good_session()) {
|
||||
);
|
||||
}
|
||||
|
||||
private addModalsWebComponent(source: string): string {
|
||||
return source.replace('<body>', '<body>\n<unraid-modals></unraid-modals>');
|
||||
}
|
||||
private applyToSource(fileContent: string): string {
|
||||
const transformers = [
|
||||
this.removeNotificationBell.bind(this),
|
||||
this.replaceToasts.bind(this),
|
||||
this.addToaster.bind(this),
|
||||
this.patchGuiBootAuth.bind(this),
|
||||
this.hideHeaderLogo.bind(this),
|
||||
this.addModalsWebComponent.bind(this),
|
||||
this.hideHeaderLogo.bind(this),
|
||||
];
|
||||
|
||||
return transformers.reduce((content, transformer) => transformer(content), fileContent);
|
||||
|
||||
59
api/src/unraid-api/utils/format.service.test.ts
Normal file
59
api/src/unraid-api/utils/format.service.test.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
describe('FormatService', () => {
|
||||
const service = new FormatService();
|
||||
|
||||
describe('formatBytes', () => {
|
||||
it('should format zero bytes', () => {
|
||||
expect(service.formatBytes(0)).toBe('0 B');
|
||||
});
|
||||
|
||||
it('should format bytes to best unit', () => {
|
||||
expect(service.formatBytes(1024)).toBe('1.02 KB');
|
||||
expect(service.formatBytes(1048576)).toBe('1.05 MB');
|
||||
expect(service.formatBytes(1073741824)).toBe('1.07 GB');
|
||||
});
|
||||
|
||||
it('should format with decimals when needed', () => {
|
||||
expect(service.formatBytes(1536)).toBe('1.54 KB');
|
||||
expect(service.formatBytes(9636529)).toBe('9.64 MB');
|
||||
});
|
||||
});
|
||||
|
||||
describe('formatSpeed', () => {
|
||||
it('should format zero speed', () => {
|
||||
expect(service.formatSpeed(0)).toBe('0 B/s');
|
||||
});
|
||||
|
||||
it('should format speed with /s suffix', () => {
|
||||
expect(service.formatSpeed(1024)).toBe('1.02 KB/s');
|
||||
expect(service.formatSpeed(1048576)).toBe('1.05 MB/s');
|
||||
expect(service.formatSpeed(1073741824)).toBe('1.07 GB/s');
|
||||
});
|
||||
|
||||
it('should format with decimals when needed', () => {
|
||||
expect(service.formatSpeed(1536)).toBe('1.54 KB/s');
|
||||
expect(service.formatSpeed(9636529.183648435)).toBe('9.64 MB/s');
|
||||
});
|
||||
});
|
||||
|
||||
describe('formatDuration', () => {
|
||||
it('should format small durations in seconds', () => {
|
||||
expect(service.formatDuration(30)).toBe('30s');
|
||||
expect(service.formatDuration(45.5)).toBe('45.5s');
|
||||
});
|
||||
|
||||
it('should format longer durations to best unit', () => {
|
||||
expect(service.formatDuration(60)).toBe('60 s');
|
||||
expect(service.formatDuration(3600)).toBe('60 min');
|
||||
expect(service.formatDuration(86400)).toBe('24 h');
|
||||
});
|
||||
|
||||
it('should format with decimals when needed', () => {
|
||||
expect(service.formatDuration(90)).toBe('1.5 min');
|
||||
expect(service.formatDuration(11.615060290966666 * 60)).toBe('11.62 min');
|
||||
});
|
||||
});
|
||||
});
|
||||
33
api/src/unraid-api/utils/format.service.ts
Normal file
33
api/src/unraid-api/utils/format.service.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import { Injectable } from '@nestjs/common';
|
||||
|
||||
import { convert } from 'convert';
|
||||
|
||||
@Injectable()
|
||||
export class FormatService {
|
||||
formatBytes(bytes: number): string {
|
||||
if (bytes === 0) return '0 B';
|
||||
|
||||
const result = convert(bytes, 'bytes').to('best');
|
||||
const value =
|
||||
typeof result.quantity === 'number' ? Number(result.quantity.toFixed(2)) : result.quantity;
|
||||
return `${value} ${result.unit}`;
|
||||
}
|
||||
|
||||
formatSpeed(bytesPerSecond: number): string {
|
||||
if (bytesPerSecond === 0) return '0 B/s';
|
||||
|
||||
const result = convert(bytesPerSecond, 'bytes').to('best');
|
||||
const value =
|
||||
typeof result.quantity === 'number' ? Number(result.quantity.toFixed(2)) : result.quantity;
|
||||
return `${value} ${result.unit}/s`;
|
||||
}
|
||||
|
||||
formatDuration(seconds: number): string {
|
||||
if (seconds < 60) return `${Math.round(seconds * 100) / 100}s`;
|
||||
|
||||
const result = convert(seconds, 'seconds').to('best');
|
||||
const value =
|
||||
typeof result.quantity === 'number' ? Number(result.quantity.toFixed(2)) : result.quantity;
|
||||
return `${value} ${result.unit}`;
|
||||
}
|
||||
}
|
||||
10
api/src/unraid-api/utils/utils.module.ts
Normal file
10
api/src/unraid-api/utils/utils.module.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { Global, Module } from '@nestjs/common';
|
||||
|
||||
import { FormatService } from '@app/unraid-api/utils/format.service.js';
|
||||
|
||||
@Global()
|
||||
@Module({
|
||||
providers: [FormatService],
|
||||
exports: [FormatService],
|
||||
})
|
||||
export class UtilsModule {}
|
||||
11
package.json
11
package.json
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"name": "unraid-monorepo",
|
||||
"private": true,
|
||||
"version": "4.9.1",
|
||||
"version": "4.9.5",
|
||||
"scripts": {
|
||||
"build": "pnpm -r build",
|
||||
"build:watch": " pnpm -r --parallel build:watch",
|
||||
"dev": "pnpm -r dev",
|
||||
"unraid:deploy": "pnpm -r unraid:deploy",
|
||||
"test": "pnpm -r test",
|
||||
"test": "vitest",
|
||||
"lint": "pnpm -r lint",
|
||||
"lint:fix": "pnpm -r lint:fix",
|
||||
"type-check": "pnpm -r type-check",
|
||||
@@ -43,7 +43,8 @@
|
||||
"@manypkg/cli": "0.24.0",
|
||||
"chalk": "5.4.1",
|
||||
"diff": "8.0.2",
|
||||
"ignore": "7.0.5"
|
||||
"ignore": "7.0.5",
|
||||
"vitest": "3.2.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"lint-staged": "16.1.2",
|
||||
@@ -54,8 +55,8 @@
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{js,jsx,ts,tsx,vue}": [
|
||||
"pnpm lint:fix"
|
||||
"npx pnpm lint:fix"
|
||||
]
|
||||
},
|
||||
"packageManager": "pnpm@10.12.4"
|
||||
"packageManager": "pnpm@10.13.1"
|
||||
}
|
||||
|
||||
@@ -25,10 +25,10 @@
|
||||
"description": "Unraid Connect plugin for Unraid API",
|
||||
"devDependencies": {
|
||||
"@apollo/client": "3.13.8",
|
||||
"@faker-js/faker": "9.8.0",
|
||||
"@faker-js/faker": "9.9.0",
|
||||
"@graphql-codegen/cli": "5.0.7",
|
||||
"@graphql-typed-document-node/core": "3.2.0",
|
||||
"@ianvs/prettier-plugin-sort-imports": "4.4.2",
|
||||
"@ianvs/prettier-plugin-sort-imports": "4.5.1",
|
||||
"@jsonforms/core": "3.6.0",
|
||||
"@nestjs/apollo": "13.1.0",
|
||||
"@nestjs/common": "11.1.3",
|
||||
@@ -41,13 +41,13 @@
|
||||
"@types/ini": "4.1.1",
|
||||
"@types/ip": "1.1.3",
|
||||
"@types/lodash-es": "4.17.12",
|
||||
"@types/node": "22.15.32",
|
||||
"@types/node": "22.16.3",
|
||||
"@types/ws": "8.18.1",
|
||||
"camelcase-keys": "9.1.3",
|
||||
"class-transformer": "0.5.1",
|
||||
"class-validator": "0.14.2",
|
||||
"execa": "9.6.0",
|
||||
"fast-check": "4.1.1",
|
||||
"fast-check": "4.2.0",
|
||||
"got": "14.4.7",
|
||||
"graphql": "16.11.0",
|
||||
"graphql-scalars": "1.24.2",
|
||||
@@ -57,13 +57,13 @@
|
||||
"jose": "6.0.11",
|
||||
"lodash-es": "4.17.21",
|
||||
"nest-authz": "2.17.0",
|
||||
"prettier": "3.5.3",
|
||||
"prettier": "3.6.2",
|
||||
"rimraf": "6.0.1",
|
||||
"rxjs": "7.8.2",
|
||||
"type-fest": "4.41.0",
|
||||
"typescript": "5.8.3",
|
||||
"vitest": "3.2.4",
|
||||
"ws": "8.18.2",
|
||||
"ws": "8.18.3",
|
||||
"zen-observable-ts": "1.1.0"
|
||||
},
|
||||
"dependencies": {
|
||||
|
||||
@@ -0,0 +1,269 @@
|
||||
import { EventEmitter2 } from '@nestjs/event-emitter';
|
||||
|
||||
import { PubSub } from 'graphql-subscriptions';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { MinigraphStatus } from '../config/connect.config.js';
|
||||
import { EVENTS, GRAPHQL_PUBSUB_CHANNEL } from '../helper/nest-tokens.js';
|
||||
import { MothershipConnectionService } from '../mothership-proxy/connection.service.js';
|
||||
import { MothershipController } from '../mothership-proxy/mothership.controller.js';
|
||||
import { MothershipHandler } from '../mothership-proxy/mothership.events.js';
|
||||
|
||||
describe('MothershipHandler - Behavioral Tests', () => {
|
||||
let handler: MothershipHandler;
|
||||
let connectionService: MothershipConnectionService;
|
||||
let mothershipController: MothershipController;
|
||||
let pubSub: PubSub;
|
||||
let eventEmitter: EventEmitter2;
|
||||
|
||||
// Track actual state changes and effects
|
||||
let connectionAttempts: Array<{ timestamp: number; reason: string }> = [];
|
||||
let publishedMessages: Array<{ channel: string; data: any }> = [];
|
||||
let controllerStops: Array<{ timestamp: number; reason?: string }> = [];
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset tracking arrays
|
||||
connectionAttempts = [];
|
||||
publishedMessages = [];
|
||||
controllerStops = [];
|
||||
|
||||
// Create real event emitter for integration testing
|
||||
eventEmitter = new EventEmitter2();
|
||||
|
||||
// Mock connection service with realistic behavior
|
||||
connectionService = {
|
||||
getIdentityState: vi.fn(),
|
||||
getConnectionState: vi.fn(),
|
||||
} as any;
|
||||
|
||||
// Mock controller that tracks behavior instead of just method calls
|
||||
mothershipController = {
|
||||
initOrRestart: vi.fn().mockImplementation(() => {
|
||||
connectionAttempts.push({
|
||||
timestamp: Date.now(),
|
||||
reason: 'initOrRestart called',
|
||||
});
|
||||
return Promise.resolve();
|
||||
}),
|
||||
stop: vi.fn().mockImplementation(() => {
|
||||
controllerStops.push({
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
return Promise.resolve();
|
||||
}),
|
||||
} as any;
|
||||
|
||||
// Mock PubSub that tracks published messages
|
||||
pubSub = {
|
||||
publish: vi.fn().mockImplementation((channel: string, data: any) => {
|
||||
publishedMessages.push({ channel, data });
|
||||
return Promise.resolve();
|
||||
}),
|
||||
} as any;
|
||||
|
||||
handler = new MothershipHandler(connectionService, mothershipController, pubSub);
|
||||
});
|
||||
|
||||
describe('Connection Recovery Behavior', () => {
|
||||
it('should attempt reconnection when ping fails', async () => {
|
||||
// Given: Connection is in ping failure state
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue({
|
||||
status: MinigraphStatus.PING_FAILURE,
|
||||
error: 'Ping timeout after 3 minutes',
|
||||
});
|
||||
|
||||
// When: Connection status change event occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: System should attempt to recover the connection
|
||||
expect(connectionAttempts).toHaveLength(1);
|
||||
expect(connectionAttempts[0].reason).toBe('initOrRestart called');
|
||||
});
|
||||
|
||||
it('should NOT interfere with exponential backoff during error retry state', async () => {
|
||||
// Given: Connection is in error retry state (GraphQL client managing backoff)
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue({
|
||||
status: MinigraphStatus.ERROR_RETRYING,
|
||||
error: 'Network error',
|
||||
timeout: 20000,
|
||||
timeoutStart: Date.now(),
|
||||
});
|
||||
|
||||
// When: Connection status change event occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: System should NOT interfere with ongoing retry logic
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should remain stable during normal connection states', async () => {
|
||||
const stableStates = [MinigraphStatus.CONNECTED, MinigraphStatus.CONNECTING];
|
||||
|
||||
for (const status of stableStates) {
|
||||
// Reset for each test
|
||||
connectionAttempts.length = 0;
|
||||
|
||||
// Given: Connection is in a stable state
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue({
|
||||
status,
|
||||
error: null,
|
||||
});
|
||||
|
||||
// When: Connection status change event occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: System should not trigger unnecessary reconnection attempts
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Identity-Based Connection Behavior', () => {
|
||||
it('should establish connection when valid API key becomes available', async () => {
|
||||
// Given: Valid API key is present
|
||||
vi.mocked(connectionService.getIdentityState).mockReturnValue({
|
||||
state: {
|
||||
apiKey: 'valid-unraid-key-12345',
|
||||
unraidVersion: '6.12.0',
|
||||
flashGuid: 'test-flash-guid',
|
||||
apiVersion: '1.0.0',
|
||||
},
|
||||
isLoaded: true,
|
||||
});
|
||||
|
||||
// When: Identity changes
|
||||
await handler.onIdentityChanged();
|
||||
|
||||
// Then: System should establish mothership connection
|
||||
expect(connectionAttempts).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should not attempt connection without valid credentials', async () => {
|
||||
const invalidCredentials = [{ apiKey: undefined }, { apiKey: '' }];
|
||||
|
||||
for (const credentials of invalidCredentials) {
|
||||
// Reset for each test
|
||||
connectionAttempts.length = 0;
|
||||
|
||||
// Given: Invalid or missing API key
|
||||
vi.mocked(connectionService.getIdentityState).mockReturnValue({
|
||||
state: credentials,
|
||||
isLoaded: false,
|
||||
});
|
||||
|
||||
// When: Identity changes
|
||||
await handler.onIdentityChanged();
|
||||
|
||||
// Then: System should not attempt connection
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Logout Behavior', () => {
|
||||
it('should properly clean up connections and notify subscribers on logout', async () => {
|
||||
// When: User logs out
|
||||
await handler.logout({ reason: 'User initiated logout' });
|
||||
|
||||
// Then: System should clean up connections
|
||||
expect(controllerStops).toHaveLength(1);
|
||||
|
||||
// And: Subscribers should be notified of empty state
|
||||
expect(publishedMessages).toHaveLength(2);
|
||||
|
||||
const serversMessage = publishedMessages.find(
|
||||
(m) => m.channel === GRAPHQL_PUBSUB_CHANNEL.SERVERS
|
||||
);
|
||||
const ownerMessage = publishedMessages.find(
|
||||
(m) => m.channel === GRAPHQL_PUBSUB_CHANNEL.OWNER
|
||||
);
|
||||
|
||||
expect(serversMessage?.data).toEqual({ servers: [] });
|
||||
expect(ownerMessage?.data).toEqual({
|
||||
owner: { username: 'root', url: '', avatar: '' },
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle logout gracefully even without explicit reason', async () => {
|
||||
// When: System logout occurs without reason
|
||||
await handler.logout({});
|
||||
|
||||
// Then: Cleanup should still occur properly
|
||||
expect(controllerStops).toHaveLength(1);
|
||||
expect(publishedMessages).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('DDoS Prevention Behavior', () => {
|
||||
it('should demonstrate exponential backoff is respected during network errors', async () => {
|
||||
// Given: Multiple rapid network errors occur
|
||||
const errorStates = [
|
||||
{ status: MinigraphStatus.ERROR_RETRYING, error: 'Network error 1' },
|
||||
{ status: MinigraphStatus.ERROR_RETRYING, error: 'Network error 2' },
|
||||
{ status: MinigraphStatus.ERROR_RETRYING, error: 'Network error 3' },
|
||||
];
|
||||
|
||||
// When: Rapid error retry states occur
|
||||
for (const state of errorStates) {
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue(state);
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
}
|
||||
|
||||
// Then: No linear retry attempts should be made (respecting exponential backoff)
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should differentiate between network errors and ping failures', async () => {
|
||||
// Given: Network error followed by ping failure
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue({
|
||||
status: MinigraphStatus.ERROR_RETRYING,
|
||||
error: 'Network error',
|
||||
});
|
||||
|
||||
// When: Network error occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: No immediate reconnection attempt
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
|
||||
// Given: Ping failure occurs (different issue)
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue({
|
||||
status: MinigraphStatus.PING_FAILURE,
|
||||
error: 'Ping timeout',
|
||||
});
|
||||
|
||||
// When: Ping failure occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: Immediate reconnection attempt should occur
|
||||
expect(connectionAttempts).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases and Error Handling', () => {
|
||||
it('should handle missing connection state gracefully', async () => {
|
||||
// Given: Connection service returns undefined
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue(undefined);
|
||||
|
||||
// When: Connection status change occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: No errors should occur, no reconnection attempts
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle malformed connection state', async () => {
|
||||
// Given: Malformed connection state
|
||||
vi.mocked(connectionService.getConnectionState).mockReturnValue({
|
||||
status: 'UNKNOWN_STATUS' as any,
|
||||
error: 'Malformed state',
|
||||
});
|
||||
|
||||
// When: Connection status change occurs
|
||||
await handler.onMothershipConnectionStatusChanged();
|
||||
|
||||
// Then: Should not trigger reconnection for unknown states
|
||||
expect(connectionAttempts).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,158 @@
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { access, constants, mkdir, readFile, rm } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { ConfigType } from '../config/connect.config.js';
|
||||
import { ConnectStatusWriterService } from './connect-status-writer.service.js';
|
||||
|
||||
describe('ConnectStatusWriterService Config Behavior', () => {
|
||||
let service: ConnectStatusWriterService;
|
||||
let configService: ConfigService<ConfigType, true>;
|
||||
const testDir = '/tmp/connect-status-config-test';
|
||||
const testFilePath = join(testDir, 'connectStatus.json');
|
||||
|
||||
// Simulate config changes
|
||||
let configStore: any = {};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Reset config store
|
||||
configStore = {};
|
||||
|
||||
// Create test directory
|
||||
await mkdir(testDir, { recursive: true });
|
||||
|
||||
// Create a ConfigService mock that behaves like the real one
|
||||
configService = {
|
||||
get: vi.fn().mockImplementation((key: string) => {
|
||||
console.log(`ConfigService.get('${key}') called, returning:`, configStore[key]);
|
||||
return configStore[key];
|
||||
}),
|
||||
set: vi.fn().mockImplementation((key: string, value: any) => {
|
||||
console.log(`ConfigService.set('${key}', ${JSON.stringify(value)}) called`);
|
||||
configStore[key] = value;
|
||||
}),
|
||||
} as unknown as ConfigService<ConfigType, true>;
|
||||
|
||||
service = new ConnectStatusWriterService(configService);
|
||||
|
||||
// Override the status file path to use our test location
|
||||
Object.defineProperty(service, 'statusFilePath', {
|
||||
get: () => testFilePath,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await service.onModuleDestroy();
|
||||
await rm(testDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should write status when config is updated directly', async () => {
|
||||
// Initialize service - should write PRE_INIT
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
let content = await readFile(testFilePath, 'utf-8');
|
||||
let data = JSON.parse(content);
|
||||
console.log('Initial status:', data);
|
||||
expect(data.connectionStatus).toBe('PRE_INIT');
|
||||
|
||||
// Update config directly (simulating what ConnectionService does)
|
||||
console.log('\n=== Updating config to CONNECTED ===');
|
||||
configService.set('connect.mothership', {
|
||||
status: 'CONNECTED',
|
||||
error: null,
|
||||
lastPing: Date.now(),
|
||||
});
|
||||
|
||||
// Call the writeStatus method directly (since @OnEvent handles the event)
|
||||
await service['writeStatus']();
|
||||
|
||||
content = await readFile(testFilePath, 'utf-8');
|
||||
data = JSON.parse(content);
|
||||
console.log('Status after config update:', data);
|
||||
expect(data.connectionStatus).toBe('CONNECTED');
|
||||
});
|
||||
|
||||
it('should test the actual flow with multiple status updates', async () => {
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
const statusUpdates = [
|
||||
{ status: 'CONNECTING', error: null, lastPing: null },
|
||||
{ status: 'CONNECTED', error: null, lastPing: Date.now() },
|
||||
{ status: 'DISCONNECTED', error: 'Lost connection', lastPing: Date.now() - 10000 },
|
||||
{ status: 'RECONNECTING', error: null, lastPing: Date.now() - 10000 },
|
||||
{ status: 'CONNECTED', error: null, lastPing: Date.now() },
|
||||
];
|
||||
|
||||
for (const update of statusUpdates) {
|
||||
console.log(`\n=== Updating to ${update.status} ===`);
|
||||
|
||||
// Update config
|
||||
configService.set('connect.mothership', update);
|
||||
|
||||
// Call writeStatus directly
|
||||
await service['writeStatus']();
|
||||
|
||||
const content = await readFile(testFilePath, 'utf-8');
|
||||
const data = JSON.parse(content);
|
||||
console.log(`Status file shows: ${data.connectionStatus}`);
|
||||
expect(data.connectionStatus).toBe(update.status);
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle case where config is not set before event', async () => {
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Delete the config
|
||||
delete configStore['connect.mothership'];
|
||||
|
||||
// Call writeStatus without config
|
||||
console.log('\n=== Calling writeStatus with no config ===');
|
||||
await service['writeStatus']();
|
||||
|
||||
const content = await readFile(testFilePath, 'utf-8');
|
||||
const data = JSON.parse(content);
|
||||
console.log('Status with no config:', data);
|
||||
expect(data.connectionStatus).toBe('PRE_INIT');
|
||||
|
||||
// Now set config and call writeStatus again
|
||||
console.log('\n=== Setting config and calling writeStatus ===');
|
||||
configService.set('connect.mothership', {
|
||||
status: 'CONNECTED',
|
||||
error: null,
|
||||
lastPing: Date.now(),
|
||||
});
|
||||
await service['writeStatus']();
|
||||
|
||||
const content2 = await readFile(testFilePath, 'utf-8');
|
||||
const data2 = JSON.parse(content2);
|
||||
console.log('Status after setting config:', data2);
|
||||
expect(data2.connectionStatus).toBe('CONNECTED');
|
||||
});
|
||||
|
||||
describe('cleanup on shutdown', () => {
|
||||
it('should delete status file on module destroy', async () => {
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Verify file exists
|
||||
await expect(access(testFilePath, constants.F_OK)).resolves.not.toThrow();
|
||||
|
||||
// Cleanup
|
||||
await service.onModuleDestroy();
|
||||
|
||||
// Verify file is deleted
|
||||
await expect(access(testFilePath, constants.F_OK)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle cleanup when file does not exist', async () => {
|
||||
// Don't bootstrap (so no file is written)
|
||||
await expect(service.onModuleDestroy()).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,167 @@
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { access, constants, mkdir, readFile, rm } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { ConfigType } from '../config/connect.config.js';
|
||||
import { ConnectStatusWriterService } from './connect-status-writer.service.js';
|
||||
|
||||
describe('ConnectStatusWriterService Integration', () => {
|
||||
let service: ConnectStatusWriterService;
|
||||
let configService: ConfigService<ConfigType, true>;
|
||||
const testDir = '/tmp/connect-status-test';
|
||||
const testFilePath = join(testDir, 'connectStatus.json');
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Create test directory
|
||||
await mkdir(testDir, { recursive: true });
|
||||
|
||||
configService = {
|
||||
get: vi.fn().mockImplementation((key: string) => {
|
||||
console.log(`ConfigService.get called with key: ${key}`);
|
||||
return {
|
||||
status: 'CONNECTED',
|
||||
error: null,
|
||||
lastPing: Date.now(),
|
||||
};
|
||||
}),
|
||||
} as unknown as ConfigService<ConfigType, true>;
|
||||
|
||||
service = new ConnectStatusWriterService(configService);
|
||||
|
||||
// Override the status file path to use our test location
|
||||
Object.defineProperty(service, 'statusFilePath', {
|
||||
get: () => testFilePath,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await service.onModuleDestroy();
|
||||
await rm(testDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should write initial PRE_INIT status, then update on event', async () => {
|
||||
// First, mock the config to return undefined (no connection metadata)
|
||||
vi.mocked(configService.get).mockReturnValue(undefined);
|
||||
|
||||
console.log('=== Starting onApplicationBootstrap ===');
|
||||
await service.onApplicationBootstrap();
|
||||
|
||||
// Wait a bit for the initial write to complete
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Read initial status
|
||||
const initialContent = await readFile(testFilePath, 'utf-8');
|
||||
const initialData = JSON.parse(initialContent);
|
||||
console.log('Initial status written:', initialData);
|
||||
|
||||
expect(initialData.connectionStatus).toBe('PRE_INIT');
|
||||
expect(initialData.error).toBeNull();
|
||||
expect(initialData.lastPing).toBeNull();
|
||||
|
||||
// Now update the mock to return CONNECTED status
|
||||
vi.mocked(configService.get).mockReturnValue({
|
||||
status: 'CONNECTED',
|
||||
error: null,
|
||||
lastPing: 1234567890,
|
||||
});
|
||||
|
||||
console.log('=== Calling writeStatus directly ===');
|
||||
await service['writeStatus']();
|
||||
|
||||
// Read updated status
|
||||
const updatedContent = await readFile(testFilePath, 'utf-8');
|
||||
const updatedData = JSON.parse(updatedContent);
|
||||
console.log('Updated status after writeStatus:', updatedData);
|
||||
|
||||
expect(updatedData.connectionStatus).toBe('CONNECTED');
|
||||
expect(updatedData.lastPing).toBe(1234567890);
|
||||
});
|
||||
|
||||
it('should handle rapid status changes correctly', async () => {
|
||||
const statusChanges = [
|
||||
{ status: 'PRE_INIT', error: null, lastPing: null },
|
||||
{ status: 'CONNECTING', error: null, lastPing: null },
|
||||
{ status: 'CONNECTED', error: null, lastPing: Date.now() },
|
||||
{ status: 'DISCONNECTED', error: 'Connection lost', lastPing: Date.now() - 5000 },
|
||||
{ status: 'CONNECTED', error: null, lastPing: Date.now() },
|
||||
];
|
||||
|
||||
let changeIndex = 0;
|
||||
vi.mocked(configService.get).mockImplementation(() => {
|
||||
const change = statusChanges[changeIndex];
|
||||
console.log(`Returning status ${changeIndex}: ${change.status}`);
|
||||
return change;
|
||||
});
|
||||
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Simulate the final status change
|
||||
changeIndex = statusChanges.length - 1;
|
||||
console.log(`=== Calling writeStatus for final status: ${statusChanges[changeIndex].status} ===`);
|
||||
await service['writeStatus']();
|
||||
|
||||
// Read final status
|
||||
const finalContent = await readFile(testFilePath, 'utf-8');
|
||||
const finalData = JSON.parse(finalContent);
|
||||
console.log('Final status after status change:', finalData);
|
||||
|
||||
// Should have the last status
|
||||
expect(finalData.connectionStatus).toBe('CONNECTED');
|
||||
expect(finalData.error).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle multiple write calls correctly', async () => {
|
||||
const writes: number[] = [];
|
||||
const originalWriteStatus = service['writeStatus'].bind(service);
|
||||
|
||||
service['writeStatus'] = async function() {
|
||||
const timestamp = Date.now();
|
||||
writes.push(timestamp);
|
||||
console.log(`writeStatus called at ${timestamp}`);
|
||||
return originalWriteStatus();
|
||||
};
|
||||
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
const initialWrites = writes.length;
|
||||
console.log(`Initial writes: ${initialWrites}`);
|
||||
|
||||
// Make multiple write calls
|
||||
for (let i = 0; i < 3; i++) {
|
||||
console.log(`Calling writeStatus ${i}`);
|
||||
await service['writeStatus']();
|
||||
}
|
||||
|
||||
console.log(`Total writes: ${writes.length}`);
|
||||
console.log('Write timestamps:', writes);
|
||||
|
||||
// Should have initial write + 3 additional writes
|
||||
expect(writes.length).toBe(initialWrites + 3);
|
||||
});
|
||||
|
||||
describe('cleanup on shutdown', () => {
|
||||
it('should delete status file on module destroy', async () => {
|
||||
await service.onApplicationBootstrap();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Verify file exists
|
||||
await expect(access(testFilePath, constants.F_OK)).resolves.not.toThrow();
|
||||
|
||||
// Cleanup
|
||||
await service.onModuleDestroy();
|
||||
|
||||
// Verify file is deleted
|
||||
await expect(access(testFilePath, constants.F_OK)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle cleanup gracefully when file does not exist', async () => {
|
||||
// Don't bootstrap (so no file is created)
|
||||
await expect(service.onModuleDestroy()).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,140 @@
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { unlink, writeFile } from 'fs/promises';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { ConfigType } from '../config/connect.config.js';
|
||||
import { ConnectStatusWriterService } from './connect-status-writer.service.js';
|
||||
|
||||
vi.mock('fs/promises', () => ({
|
||||
writeFile: vi.fn(),
|
||||
unlink: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('ConnectStatusWriterService', () => {
|
||||
let service: ConnectStatusWriterService;
|
||||
let configService: ConfigService<ConfigType, true>;
|
||||
let writeFileMock: ReturnType<typeof vi.fn>;
|
||||
let unlinkMock: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
vi.useFakeTimers();
|
||||
|
||||
writeFileMock = vi.mocked(writeFile);
|
||||
unlinkMock = vi.mocked(unlink);
|
||||
|
||||
configService = {
|
||||
get: vi.fn().mockReturnValue({
|
||||
status: 'CONNECTED',
|
||||
error: null,
|
||||
lastPing: Date.now(),
|
||||
}),
|
||||
} as unknown as ConfigService<ConfigType, true>;
|
||||
|
||||
service = new ConnectStatusWriterService(configService);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe('onApplicationBootstrap', () => {
|
||||
it('should write initial status on bootstrap', async () => {
|
||||
await service.onApplicationBootstrap();
|
||||
|
||||
expect(writeFileMock).toHaveBeenCalledTimes(1);
|
||||
expect(writeFileMock).toHaveBeenCalledWith(
|
||||
'/var/local/emhttp/connectStatus.json',
|
||||
expect.stringContaining('CONNECTED')
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle event-driven status changes', async () => {
|
||||
await service.onApplicationBootstrap();
|
||||
writeFileMock.mockClear();
|
||||
|
||||
// The service uses @OnEvent decorator, so we need to call the method directly
|
||||
await service['writeStatus']();
|
||||
|
||||
expect(writeFileMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('write content', () => {
|
||||
it('should write correct JSON structure with all fields', async () => {
|
||||
const mockMetadata = {
|
||||
status: 'CONNECTED',
|
||||
error: 'Some error',
|
||||
lastPing: 1234567890,
|
||||
};
|
||||
|
||||
vi.mocked(configService.get).mockReturnValue(mockMetadata);
|
||||
|
||||
await service.onApplicationBootstrap();
|
||||
|
||||
const writeCall = writeFileMock.mock.calls[0];
|
||||
const writtenData = JSON.parse(writeCall[1] as string);
|
||||
|
||||
expect(writtenData).toMatchObject({
|
||||
connectionStatus: 'CONNECTED',
|
||||
error: 'Some error',
|
||||
lastPing: 1234567890,
|
||||
allowedOrigins: '',
|
||||
});
|
||||
expect(writtenData.timestamp).toBeDefined();
|
||||
expect(typeof writtenData.timestamp).toBe('number');
|
||||
});
|
||||
|
||||
it('should handle missing connection metadata', async () => {
|
||||
vi.mocked(configService.get).mockReturnValue(undefined);
|
||||
|
||||
await service.onApplicationBootstrap();
|
||||
|
||||
const writeCall = writeFileMock.mock.calls[0];
|
||||
const writtenData = JSON.parse(writeCall[1] as string);
|
||||
|
||||
expect(writtenData).toMatchObject({
|
||||
connectionStatus: 'PRE_INIT',
|
||||
error: null,
|
||||
lastPing: null,
|
||||
allowedOrigins: '',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle write errors gracefully', async () => {
|
||||
writeFileMock.mockRejectedValue(new Error('Write failed'));
|
||||
|
||||
await expect(service.onApplicationBootstrap()).resolves.not.toThrow();
|
||||
|
||||
// Test direct write error handling
|
||||
await expect(service['writeStatus']()).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('cleanup on shutdown', () => {
|
||||
it('should delete status file on module destroy', async () => {
|
||||
await service.onModuleDestroy();
|
||||
|
||||
expect(unlinkMock).toHaveBeenCalledTimes(1);
|
||||
expect(unlinkMock).toHaveBeenCalledWith('/var/local/emhttp/connectStatus.json');
|
||||
});
|
||||
|
||||
it('should handle file deletion errors gracefully', async () => {
|
||||
unlinkMock.mockRejectedValue(new Error('File not found'));
|
||||
|
||||
await expect(service.onModuleDestroy()).resolves.not.toThrow();
|
||||
|
||||
expect(unlinkMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should ensure file is deleted even if it was never written', async () => {
|
||||
// Don't bootstrap (so no file is written)
|
||||
await service.onModuleDestroy();
|
||||
|
||||
expect(unlinkMock).toHaveBeenCalledTimes(1);
|
||||
expect(unlinkMock).toHaveBeenCalledWith('/var/local/emhttp/connectStatus.json');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,69 @@
|
||||
import { Injectable, Logger, OnApplicationBootstrap, OnModuleDestroy } from '@nestjs/common';
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { OnEvent } from '@nestjs/event-emitter';
|
||||
import { unlink } from 'fs/promises';
|
||||
import { writeFile } from 'fs/promises';
|
||||
|
||||
import { ConfigType, ConnectionMetadata } from '../config/connect.config.js';
|
||||
import { EVENTS } from '../helper/nest-tokens.js';
|
||||
|
||||
@Injectable()
|
||||
export class ConnectStatusWriterService implements OnApplicationBootstrap, OnModuleDestroy {
|
||||
constructor(private readonly configService: ConfigService<ConfigType, true>) {}
|
||||
|
||||
private logger = new Logger(ConnectStatusWriterService.name);
|
||||
|
||||
get statusFilePath() {
|
||||
// Write to /var/local/emhttp/connectStatus.json so PHP can read it
|
||||
return '/var/local/emhttp/connectStatus.json';
|
||||
}
|
||||
|
||||
async onApplicationBootstrap() {
|
||||
this.logger.verbose(`Status file path: ${this.statusFilePath}`);
|
||||
|
||||
// Write initial status
|
||||
await this.writeStatus();
|
||||
}
|
||||
|
||||
async onModuleDestroy() {
|
||||
try {
|
||||
await unlink(this.statusFilePath);
|
||||
this.logger.verbose(`Status file deleted: ${this.statusFilePath}`);
|
||||
} catch (error) {
|
||||
this.logger.debug(`Could not delete status file: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
@OnEvent(EVENTS.MOTHERSHIP_CONNECTION_STATUS_CHANGED, { async: true })
|
||||
private async writeStatus() {
|
||||
try {
|
||||
const connectionMetadata = this.configService.get<ConnectionMetadata>('connect.mothership');
|
||||
|
||||
// Try to get allowed origins from the store
|
||||
let allowedOrigins = '';
|
||||
try {
|
||||
// We can't import from @app here, so we'll skip allowed origins for now
|
||||
// This can be added later if needed
|
||||
allowedOrigins = '';
|
||||
} catch (error) {
|
||||
this.logger.debug('Could not get allowed origins:', error);
|
||||
}
|
||||
|
||||
const statusData = {
|
||||
connectionStatus: connectionMetadata?.status || 'PRE_INIT',
|
||||
error: connectionMetadata?.error || null,
|
||||
lastPing: connectionMetadata?.lastPing || null,
|
||||
allowedOrigins: allowedOrigins,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
|
||||
const data = JSON.stringify(statusData, null, 2);
|
||||
this.logger.verbose(`Writing connection status: ${data}`);
|
||||
|
||||
await writeFile(this.statusFilePath, data);
|
||||
this.logger.verbose(`Status written to ${this.statusFilePath}`);
|
||||
} catch (error) {
|
||||
this.logger.error(error, `Error writing status to '${this.statusFilePath}'`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ export class MothershipHandler {
|
||||
const state = this.connectionService.getConnectionState();
|
||||
if (
|
||||
state &&
|
||||
[MinigraphStatus.PING_FAILURE, MinigraphStatus.ERROR_RETRYING].includes(state.status)
|
||||
[MinigraphStatus.PING_FAILURE].includes(state.status)
|
||||
) {
|
||||
this.logger.verbose(
|
||||
'Mothership connection status changed to %s; setting up mothership subscription',
|
||||
|
||||
@@ -3,18 +3,20 @@ import { Module } from '@nestjs/common';
|
||||
import { ConnectApiKeyService } from '../authn/connect-api-key.service.js';
|
||||
import { CloudResolver } from '../connection-status/cloud.resolver.js';
|
||||
import { CloudService } from '../connection-status/cloud.service.js';
|
||||
import { ConnectStatusWriterService } from '../connection-status/connect-status-writer.service.js';
|
||||
import { TimeoutCheckerJob } from '../connection-status/timeout-checker.job.js';
|
||||
import { InternalClientService } from '../internal-rpc/internal.client.js';
|
||||
import { RemoteAccessModule } from '../remote-access/remote-access.module.js';
|
||||
import { MothershipConnectionService } from './connection.service.js';
|
||||
import { MothershipGraphqlClientService } from './graphql.client.js';
|
||||
import { MothershipSubscriptionHandler } from './mothership-subscription.handler.js';
|
||||
import { MothershipHandler } from './mothership.events.js';
|
||||
import { MothershipController } from './mothership.controller.js';
|
||||
import { MothershipHandler } from './mothership.events.js';
|
||||
|
||||
@Module({
|
||||
imports: [RemoteAccessModule],
|
||||
providers: [
|
||||
ConnectStatusWriterService,
|
||||
ConnectApiKeyService,
|
||||
MothershipConnectionService,
|
||||
MothershipGraphqlClientService,
|
||||
|
||||
@@ -3,11 +3,11 @@ import { EventEmitter2 } from '@nestjs/event-emitter';
|
||||
import { Args, Mutation, Query, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { type Layout } from '@jsonforms/core';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
import { Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { DataSlice } from '@unraid/shared/jsonforms/settings.js';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
import { UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
import { AuthActionVerb, AuthPossession } from 'nest-authz';
|
||||
|
||||
import { EVENTS } from '../helper/nest-tokens.js';
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"commander": "14.0.0",
|
||||
"create-create-app": "7.3.0",
|
||||
"fs-extra": "11.3.0",
|
||||
"inquirer": "12.6.3",
|
||||
"inquirer": "12.7.0",
|
||||
"validate-npm-package-name": "6.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -25,7 +25,7 @@
|
||||
"@nestjs/graphql": "13.1.0",
|
||||
"@types/fs-extra": "11.0.4",
|
||||
"@types/inquirer": "9.0.8",
|
||||
"@types/node": "22.15.32",
|
||||
"@types/node": "22.16.3",
|
||||
"@types/validate-npm-package-name": "4.0.2",
|
||||
"class-transformer": "0.5.1",
|
||||
"class-validator": "0.14.2",
|
||||
|
||||
@@ -31,9 +31,9 @@
|
||||
"@jsonforms/core": "3.6.0",
|
||||
"@nestjs/common": "11.1.3",
|
||||
"@nestjs/graphql": "13.1.0",
|
||||
"@types/bun": "1.2.16",
|
||||
"@types/bun": "1.2.18",
|
||||
"@types/lodash-es": "4.17.12",
|
||||
"@types/node": "22.15.32",
|
||||
"@types/node": "22.16.3",
|
||||
"class-validator": "0.14.2",
|
||||
"graphql": "16.11.0",
|
||||
"graphql-scalars": "1.24.2",
|
||||
|
||||
@@ -10,6 +10,7 @@ export enum Resource {
|
||||
ACTIVATION_CODE = 'ACTIVATION_CODE',
|
||||
API_KEY = 'API_KEY',
|
||||
ARRAY = 'ARRAY',
|
||||
BACKUP = 'BACKUP',
|
||||
CLOUD = 'CLOUD',
|
||||
CONFIG = 'CONFIG',
|
||||
CONNECT = 'CONNECT',
|
||||
|
||||
@@ -10,6 +10,7 @@ import { cleanupTxzFiles } from "./utils/cleanup";
|
||||
import { apiDir } from "./utils/paths";
|
||||
import { getVendorBundleName, getVendorFullPath } from "./build-vendor-store";
|
||||
import { getAssetUrl } from "./utils/bucket-urls";
|
||||
import { ensureRclone } from "./utils/rclone-helper";
|
||||
|
||||
|
||||
// Recursively search for manifest files
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
{
|
||||
"name": "@unraid/connect-plugin",
|
||||
"version": "4.9.1",
|
||||
"version": "4.9.5",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"commander": "14.0.0",
|
||||
"conventional-changelog": "6.0.0",
|
||||
"date-fns": "4.1.0",
|
||||
"glob": "11.0.1",
|
||||
"glob": "11.0.3",
|
||||
"html-sloppy-escaper": "0.1.0",
|
||||
"semver": "7.7.1",
|
||||
"tsx": "4.19.3",
|
||||
"zod": "3.24.2",
|
||||
"zx": "8.3.2"
|
||||
"semver": "7.7.2",
|
||||
"tsx": "4.20.3",
|
||||
"zod": "3.25.76",
|
||||
"zx": "8.6.2"
|
||||
},
|
||||
"type": "module",
|
||||
"license": "GPL-2.0-or-later",
|
||||
@@ -37,7 +37,7 @@
|
||||
"devDependencies": {
|
||||
"http-server": "14.1.1",
|
||||
"nodemon": "3.1.10",
|
||||
"vitest": "3.0.7"
|
||||
"vitest": "3.2.4"
|
||||
},
|
||||
"packageManager": "pnpm@10.12.4"
|
||||
"packageManager": "pnpm@10.13.1"
|
||||
}
|
||||
|
||||
@@ -138,6 +138,34 @@ exit 0
|
||||
</INLINE>
|
||||
</FILE>
|
||||
|
||||
<FILE Run="/bin/bash" Method="install">
|
||||
<INLINE>
|
||||
<![CDATA[
|
||||
echo "Patching header logo if necessary..."
|
||||
|
||||
# We do this here instead of via API FileModification to avoid undesirable
|
||||
# rollback when the API is stopped.
|
||||
#
|
||||
# This is necessary on < 7.2 because the unraid-header-os-version web component
|
||||
# that ships with the base OS only displayes the version, not the logo as well.
|
||||
#
|
||||
# Rolling back in this case (i.e when stopping the API) yields a duplicate logo
|
||||
# that blocks interaction with the navigation menu.
|
||||
|
||||
# Remove the old header logo from DefaultPageLayout.php if present
|
||||
if [ -f "/usr/local/emhttp/plugins/dynamix/include/DefaultPageLayout.php" ]; then
|
||||
sed -i 's|<a href="https://unraid.net" target="_blank"><?readfile("$docroot/webGui/images/UN-logotype-gradient.svg")?></a>||g' "/usr/local/emhttp/plugins/dynamix/include/DefaultPageLayout.php"
|
||||
|
||||
# Add unraid-modals element if not already present
|
||||
if ! grep -q '<unraid-modals>' "/usr/local/emhttp/plugins/dynamix/include/DefaultPageLayout.php"; then
|
||||
sed -i 's|<body>|<body>\n<unraid-modals></unraid-modals>|' "/usr/local/emhttp/plugins/dynamix/include/DefaultPageLayout.php"
|
||||
fi
|
||||
fi
|
||||
|
||||
]]>
|
||||
</INLINE>
|
||||
</FILE>
|
||||
|
||||
<FILE Run="/bin/bash" Method="remove">
|
||||
<INLINE>
|
||||
MAINNAME="&name;"
|
||||
|
||||
@@ -23,9 +23,16 @@ $myservers_flash_cfg_path='/boot/config/plugins/dynamix.my.servers/myservers.cfg
|
||||
$myservers = file_exists($myservers_flash_cfg_path) ? @parse_ini_file($myservers_flash_cfg_path,true) : [];
|
||||
$isRegistered = !empty($myservers['remote']['username']);
|
||||
|
||||
$myservers_memory_cfg_path ='/var/local/emhttp/myservers.cfg';
|
||||
$mystatus = (file_exists($myservers_memory_cfg_path)) ? @parse_ini_file($myservers_memory_cfg_path) : [];
|
||||
$isConnected = (($mystatus['minigraph']??'')==='CONNECTED') ? true : false;
|
||||
// Read connection status from the new API status file
|
||||
$statusFilePath = '/var/local/emhttp/connectStatus.json';
|
||||
$connectionStatus = '';
|
||||
|
||||
if (file_exists($statusFilePath)) {
|
||||
$statusData = @json_decode(file_get_contents($statusFilePath), true);
|
||||
$connectionStatus = $statusData['connectionStatus'] ?? '';
|
||||
}
|
||||
|
||||
$isConnected = ($connectionStatus === 'CONNECTED') ? true : false;
|
||||
|
||||
$flashbackup_ini = '/var/local/emhttp/flashbackup.ini';
|
||||
|
||||
|
||||
@@ -168,9 +168,8 @@ class ServerState
|
||||
private function getMyServersCfgValues()
|
||||
{
|
||||
/**
|
||||
* @todo can we read this from somewhere other than the flash? Connect page uses this path and /boot/config/plugins/dynamix.my.servers/myservers.cfg…
|
||||
* - $myservers_memory_cfg_path ='/var/local/emhttp/myservers.cfg';
|
||||
* - $mystatus = (file_exists($myservers_memory_cfg_path)) ? @parse_ini_file($myservers_memory_cfg_path) : [];
|
||||
* Memory config is now written by the new API to /usr/local/emhttp/state/myservers.cfg
|
||||
* This contains runtime state including connection status.
|
||||
*/
|
||||
$flashCfgPath = '/boot/config/plugins/dynamix.my.servers/myservers.cfg';
|
||||
$this->myServersFlashCfg = file_exists($flashCfgPath) ? @parse_ini_file($flashCfgPath, true) : [];
|
||||
@@ -212,11 +211,19 @@ class ServerState
|
||||
* Include localhost in the test, but only display HTTP(S) URLs that do not include localhost.
|
||||
*/
|
||||
$this->host = $_SERVER['HTTP_HOST'] ?? "unknown";
|
||||
$memoryCfgPath = '/var/local/emhttp/myservers.cfg';
|
||||
$this->myServersMemoryCfg = (file_exists($memoryCfgPath)) ? @parse_ini_file($memoryCfgPath) : [];
|
||||
$this->myServersMiniGraphConnected = (($this->myServersMemoryCfg['minigraph'] ?? '') === 'CONNECTED');
|
||||
// Read connection status and allowed origins from the new API status file
|
||||
$statusFilePath = '/var/local/emhttp/connectStatus.json';
|
||||
$connectionStatus = '';
|
||||
$allowedOrigins = '';
|
||||
|
||||
if (file_exists($statusFilePath)) {
|
||||
$statusData = @json_decode(file_get_contents($statusFilePath), true);
|
||||
$connectionStatus = $statusData['connectionStatus'] ?? '';
|
||||
$allowedOrigins = $statusData['allowedOrigins'] ?? '';
|
||||
}
|
||||
|
||||
$this->myServersMiniGraphConnected = ($connectionStatus === 'CONNECTED');
|
||||
|
||||
$allowedOrigins = $this->myServersMemoryCfg['allowedOrigins'] ?? "";
|
||||
$extraOrigins = $this->myServersFlashCfg['api']['extraOrigins'] ?? "";
|
||||
$combinedOrigins = $allowedOrigins . "," . $extraOrigins; // combine the two strings for easier searching
|
||||
$combinedOrigins = str_replace(" ", "", $combinedOrigins); // replace any spaces with nothing
|
||||
|
||||
3086
pnpm-lock.yaml
generated
3086
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user