mirror of
https://github.com/unraid/api.git
synced 2026-01-06 00:30:22 -06:00
Compare commits
70 Commits
4.25.2-bui
...
renovate/m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85b947dc40 | ||
|
|
9ef1cf1eca | ||
|
|
a0745e15ca | ||
|
|
c39b0b267c | ||
|
|
73135b8328 | ||
|
|
e42d619b6d | ||
|
|
560db880cc | ||
|
|
d6055f102b | ||
|
|
d099e7521d | ||
|
|
bb9b539732 | ||
|
|
0e44e73bf7 | ||
|
|
277ac42046 | ||
|
|
e1e3ea7eb6 | ||
|
|
8b155d1f1c | ||
|
|
d13a1f6174 | ||
|
|
e243ae836e | ||
|
|
01a63fd86b | ||
|
|
df78608457 | ||
|
|
ca3bee4ad5 | ||
|
|
024ae69343 | ||
|
|
99ce88bfdc | ||
|
|
73b2ce360c | ||
|
|
d6e29395c8 | ||
|
|
317e0fa307 | ||
|
|
331c913329 | ||
|
|
abf3461348 | ||
|
|
079a09ec90 | ||
|
|
e4223ab5a1 | ||
|
|
6f54206a4a | ||
|
|
e35bcc72f1 | ||
|
|
74df938e45 | ||
|
|
51f025b105 | ||
|
|
23a71207dd | ||
|
|
832e9d04f2 | ||
|
|
31af99e52f | ||
|
|
933cefa020 | ||
|
|
375dcd0598 | ||
|
|
64875edbba | ||
|
|
330e81a484 | ||
|
|
b8f0fdf8d2 | ||
|
|
36c104915e | ||
|
|
dc9a036c73 | ||
|
|
c71b0487ad | ||
|
|
e7340431a5 | ||
|
|
e4a9b8291b | ||
|
|
6b6b78fa2e | ||
|
|
e2fdf6cadb | ||
|
|
3d4f193fa4 | ||
|
|
b28ef1ea33 | ||
|
|
ee0f240233 | ||
|
|
3aacaa1fb5 | ||
|
|
0cd4c0ae16 | ||
|
|
66625ded6a | ||
|
|
f8a6785e9c | ||
|
|
d7aca81c60 | ||
|
|
854b403fbd | ||
|
|
c264a1843c | ||
|
|
45cda4af80 | ||
|
|
64eb9ce9b5 | ||
|
|
d56797c59f | ||
|
|
92af3b6115 | ||
|
|
35f8bc2258 | ||
|
|
c4cd0c6352 | ||
|
|
818e7ce997 | ||
|
|
7e13202aa1 | ||
|
|
d18eaf2364 | ||
|
|
42406e795d | ||
|
|
11d2de5d08 | ||
|
|
031c1ab5dc | ||
|
|
34075e44c5 |
@@ -241,4 +241,3 @@ const pinia = createTestingPinia({
|
||||
- Set initial state for focused testing
|
||||
- Test computed properties by accessing them directly
|
||||
- Verify state changes by updating the store
|
||||
|
||||
|
||||
27
.github/workflows/build-artifacts.yml
vendored
27
.github/workflows/build-artifacts.yml
vendored
@@ -32,13 +32,13 @@ jobs:
|
||||
name: Build API
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build_number: ${{ steps.buildnumber.outputs.build_number }}
|
||||
build_number: ${{ steps.buildnumber.outputs.build_number || steps.fallback_buildnumber.outputs.build_number }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: api
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.ref || github.ref }}
|
||||
fetch-depth: 0
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: 'pnpm'
|
||||
@@ -81,18 +81,25 @@ jobs:
|
||||
|
||||
- name: Generate build number
|
||||
id: buildnumber
|
||||
if: github.repository == 'unraid/api'
|
||||
continue-on-error: true
|
||||
uses: onyxmueller/build-tag-number@v1
|
||||
with:
|
||||
token: ${{ secrets.UNRAID_BOT_GITHUB_ADMIN_TOKEN || github.token }}
|
||||
prefix: ${{ inputs.version_override || steps.vars.outputs.PACKAGE_LOCK_VERSION }}
|
||||
|
||||
- name: Generate fallback build number
|
||||
id: fallback_buildnumber
|
||||
if: steps.buildnumber.outcome != 'success'
|
||||
run: echo "build_number=${GITHUB_RUN_NUMBER}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
pnpm run build:release
|
||||
tar -czf deploy/unraid-api.tgz -C deploy/pack/ .
|
||||
|
||||
- name: Upload tgz to Github artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: unraid-api
|
||||
path: ${{ github.workspace }}/api/deploy/unraid-api.tgz
|
||||
@@ -105,7 +112,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.ref || github.ref }}
|
||||
|
||||
@@ -115,7 +122,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: 'pnpm'
|
||||
@@ -138,7 +145,7 @@ jobs:
|
||||
run: pnpm run build:wc
|
||||
|
||||
- name: Upload Artifact to Github
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: unraid-wc-ui
|
||||
path: unraid-ui/dist-wc/
|
||||
@@ -151,7 +158,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.ref || github.ref }}
|
||||
|
||||
@@ -169,7 +176,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: 'pnpm'
|
||||
@@ -194,7 +201,7 @@ jobs:
|
||||
run: pnpm run build
|
||||
|
||||
- name: Upload build to Github artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: unraid-wc-rich
|
||||
path: web/dist
|
||||
|
||||
28
.github/workflows/build-plugin.yml
vendored
28
.github/workflows/build-plugin.yml
vendored
@@ -56,7 +56,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
fetch-depth: 0
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: 'pnpm'
|
||||
@@ -78,7 +78,21 @@ jobs:
|
||||
GIT_SHA=$(git rev-parse --short HEAD)
|
||||
IS_TAGGED=$(git describe --tags --abbrev=0 --exact-match || echo '')
|
||||
PACKAGE_LOCK_VERSION=$(jq -r '.version' package.json)
|
||||
API_VERSION=$([[ -n "$IS_TAGGED" ]] && echo "$PACKAGE_LOCK_VERSION" || echo "${PACKAGE_LOCK_VERSION}+${GIT_SHA}")
|
||||
|
||||
# For release builds, trust the release tag version to avoid stale checkouts
|
||||
if [ "${{ inputs.RELEASE_CREATED }}" = "true" ] && [ -n "${{ inputs.RELEASE_TAG }}" ]; then
|
||||
TAG_VERSION="${{ inputs.RELEASE_TAG }}"
|
||||
TAG_VERSION="${TAG_VERSION#v}" # trim leading v if present
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_LOCK_VERSION" ]; then
|
||||
echo "::warning::Release tag version ($TAG_VERSION) does not match package.json version ($PACKAGE_LOCK_VERSION). Using tag version for TXZ naming."
|
||||
fi
|
||||
|
||||
API_VERSION="$TAG_VERSION"
|
||||
else
|
||||
API_VERSION=$([[ -n "$IS_TAGGED" ]] && echo "$PACKAGE_LOCK_VERSION" || echo "${PACKAGE_LOCK_VERSION}+${GIT_SHA}")
|
||||
fi
|
||||
|
||||
echo "API_VERSION=${API_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install dependencies
|
||||
@@ -87,19 +101,19 @@ jobs:
|
||||
pnpm install --frozen-lockfile --filter @unraid/connect-plugin
|
||||
|
||||
- name: Download Unraid UI Components
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: unraid-wc-ui
|
||||
path: ${{ github.workspace }}/plugin/source/dynamix.unraid.net/usr/local/emhttp/plugins/dynamix.my.servers/unraid-components/uui
|
||||
merge-multiple: true
|
||||
- name: Download Unraid Web Components
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
pattern: unraid-wc-rich
|
||||
path: ${{ github.workspace }}/plugin/source/dynamix.unraid.net/usr/local/emhttp/plugins/dynamix.my.servers/unraid-components/standalone
|
||||
merge-multiple: true
|
||||
- name: Download Unraid API
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: unraid-api
|
||||
path: ${{ github.workspace }}/plugin/api/
|
||||
@@ -128,7 +142,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload to GHA
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: unraid-plugin-${{ github.run_id }}-${{ inputs.RELEASE_TAG }}
|
||||
path: plugin/deploy/
|
||||
|
||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@@ -24,17 +24,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/codeql-config.yml
|
||||
queries: +security-and-quality
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@v4
|
||||
4
.github/workflows/deploy-storybook.yml
vendored
4
.github/workflows/deploy-storybook.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
name: Deploy Storybook
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
name: Install pnpm
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: 'pnpm'
|
||||
|
||||
4
.github/workflows/generate-release-notes.yml
vendored
4
.github/workflows/generate-release-notes.yml
vendored
@@ -31,14 +31,14 @@ jobs:
|
||||
release_notes: ${{ steps.generate_notes.outputs.release_notes }}
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.target_commitish || github.ref }}
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.UNRAID_BOT_GITHUB_ADMIN_TOKEN }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
|
||||
8
.github/workflows/main.yml
vendored
8
.github/workflows/main.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
run_install: false
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: 'pnpm'
|
||||
@@ -177,7 +177,9 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- id: release
|
||||
uses: googleapis/release-please-action@v4
|
||||
|
||||
6
.github/workflows/manual-release.yml
vendored
6
.github/workflows/manual-release.yml
vendored
@@ -31,14 +31,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.target_commitish || github.ref }}
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.UNRAID_BOT_GITHUB_ADMIN_TOKEN }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
@@ -167,7 +167,7 @@ jobs:
|
||||
release_notes: ${{ needs.generate-release-notes.outputs.release_notes }}
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ inputs.target_commitish || github.ref }}
|
||||
fetch-depth: 0
|
||||
|
||||
30
.github/workflows/publish-schema.yml
vendored
Normal file
30
.github/workflows/publish-schema.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Publish GraphQL Schema
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'api/generated-schema.graphql'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
publish-schema:
|
||||
name: Publish Schema to Apollo Studio
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install Apollo Rover CLI
|
||||
run: |
|
||||
curl -sSL https://rover.apollo.dev/nix/latest | sh
|
||||
echo "$HOME/.rover/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Publish schema to Apollo Studio
|
||||
env:
|
||||
APOLLO_KEY: ${{ secrets.APOLLO_KEY }}
|
||||
run: |
|
||||
rover graph publish Unraid-API@current \
|
||||
--schema api/generated-schema.graphql
|
||||
|
||||
2
.github/workflows/release-production.yml
vendored
2
.github/workflows/release-production.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
with:
|
||||
latest: true
|
||||
prerelease: false
|
||||
- uses: actions/setup-node@v5
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 22.19.0
|
||||
- run: |
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -123,3 +123,6 @@ api/dev/Unraid.net/myservers.cfg
|
||||
# local Mise settings
|
||||
.mise.toml
|
||||
|
||||
# Compiled test pages (generated from Nunjucks templates)
|
||||
web/public/test-pages/*.html
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{".":"4.25.2"}
|
||||
{".":"4.29.2"}
|
||||
|
||||
@@ -63,15 +63,6 @@
|
||||
*/
|
||||
|
||||
.unapi {
|
||||
--color-alpha: #1c1b1b;
|
||||
--color-beta: #f2f2f2;
|
||||
--color-gamma: #999999;
|
||||
--color-gamma-opaque: rgba(153, 153, 153, 0.5);
|
||||
--color-customgradient-start: rgba(242, 242, 242, 0);
|
||||
--color-customgradient-end: rgba(242, 242, 242, 0.85);
|
||||
--shadow-beta: 0 25px 50px -12px rgba(242, 242, 242, 0.15);
|
||||
--ring-offset-shadow: 0 0 var(--color-beta);
|
||||
--ring-shadow: 0 0 var(--color-beta);
|
||||
}
|
||||
|
||||
.unapi button:not(:disabled),
|
||||
|
||||
@@ -5,88 +5,64 @@
|
||||
*/
|
||||
|
||||
/* Default/White Theme */
|
||||
:root,
|
||||
.theme-white {
|
||||
--header-text-primary: #ffffff;
|
||||
--header-text-secondary: #999999;
|
||||
--header-background-color: #1c1b1b;
|
||||
--header-gradient-start: rgba(28, 27, 27, 0);
|
||||
--header-gradient-end: rgba(28, 27, 27, 0.7);
|
||||
.Theme--white {
|
||||
--color-border: #383735;
|
||||
--color-alpha: #ff8c2f;
|
||||
--color-beta: #1c1b1b;
|
||||
--color-gamma: #ffffff;
|
||||
--color-gamma-opaque: rgba(255, 255, 255, 0.3);
|
||||
--color-header-gradient-start: color-mix(in srgb, var(--header-background-color) 0%, transparent);
|
||||
--color-header-gradient-end: color-mix(in srgb, var(--header-background-color) 100%, transparent);
|
||||
--shadow-beta: 0 25px 50px -12px color-mix(in srgb, var(--color-beta) 15%, transparent);
|
||||
--ring-offset-shadow: 0 0 var(--color-beta);
|
||||
--ring-shadow: 0 0 var(--color-beta);
|
||||
}
|
||||
|
||||
/* Black Theme */
|
||||
.theme-black,
|
||||
.theme-black.dark {
|
||||
--header-text-primary: #1c1b1b;
|
||||
--header-text-secondary: #999999;
|
||||
--header-background-color: #f2f2f2;
|
||||
--header-gradient-start: rgba(242, 242, 242, 0);
|
||||
--header-gradient-end: rgba(242, 242, 242, 0.7);
|
||||
.Theme--black,
|
||||
.Theme--black.dark {
|
||||
--color-border: #e0e0e0;
|
||||
--color-alpha: #ff8c2f;
|
||||
--color-beta: #f2f2f2;
|
||||
--color-gamma: #1c1b1b;
|
||||
--color-gamma-opaque: rgba(28, 27, 27, 0.3);
|
||||
--color-header-gradient-start: color-mix(in srgb, var(--header-background-color) 0%, transparent);
|
||||
--color-header-gradient-end: color-mix(in srgb, var(--header-background-color) 100%, transparent);
|
||||
--shadow-beta: 0 25px 50px -12px color-mix(in srgb, var(--color-beta) 15%, transparent);
|
||||
--ring-offset-shadow: 0 0 var(--color-beta);
|
||||
--ring-shadow: 0 0 var(--color-beta);
|
||||
}
|
||||
|
||||
/* Gray Theme */
|
||||
.theme-gray {
|
||||
--header-text-primary: #ffffff;
|
||||
--header-text-secondary: #999999;
|
||||
--header-background-color: #1c1b1b;
|
||||
--header-gradient-start: rgba(28, 27, 27, 0);
|
||||
--header-gradient-end: rgba(28, 27, 27, 0.7);
|
||||
.Theme--gray,
|
||||
.Theme--gray.dark {
|
||||
--color-border: #383735;
|
||||
--color-alpha: #ff8c2f;
|
||||
--color-beta: #383735;
|
||||
--color-gamma: #ffffff;
|
||||
--color-gamma-opaque: rgba(255, 255, 255, 0.3);
|
||||
--color-header-gradient-start: color-mix(in srgb, var(--header-background-color) 0%, transparent);
|
||||
--color-header-gradient-end: color-mix(in srgb, var(--header-background-color) 100%, transparent);
|
||||
--shadow-beta: 0 25px 50px -12px color-mix(in srgb, var(--color-beta) 15%, transparent);
|
||||
--ring-offset-shadow: 0 0 var(--color-beta);
|
||||
--ring-shadow: 0 0 var(--color-beta);
|
||||
}
|
||||
|
||||
/* Azure Theme */
|
||||
.theme-azure {
|
||||
--header-text-primary: #1c1b1b;
|
||||
--header-text-secondary: #999999;
|
||||
--header-background-color: #f2f2f2;
|
||||
--header-gradient-start: rgba(242, 242, 242, 0);
|
||||
--header-gradient-end: rgba(242, 242, 242, 0.7);
|
||||
.Theme--azure {
|
||||
--color-border: #5a8bb8;
|
||||
--color-alpha: #ff8c2f;
|
||||
--color-beta: #e7f2f8;
|
||||
--color-gamma: #336699;
|
||||
--color-gamma-opaque: rgba(51, 102, 153, 0.3);
|
||||
--color-header-gradient-start: color-mix(in srgb, var(--header-background-color) 0%, transparent);
|
||||
--color-header-gradient-end: color-mix(in srgb, var(--header-background-color) 100%, transparent);
|
||||
--shadow-beta: 0 25px 50px -12px color-mix(in srgb, var(--color-beta) 15%, transparent);
|
||||
--ring-offset-shadow: 0 0 var(--color-beta);
|
||||
--ring-shadow: 0 0 var(--color-beta);
|
||||
}
|
||||
|
||||
/* Dark Mode Overrides */
|
||||
.dark {
|
||||
--color-border: #383735;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dynamic color variables for user overrides from GraphQL
|
||||
* These are set via JavaScript and override the theme defaults
|
||||
* Using :root with class for higher specificity to override theme classes
|
||||
*/
|
||||
:root.has-custom-header-text {
|
||||
--header-text-primary: var(--custom-header-text-primary);
|
||||
--color-header-text-primary: var(--custom-header-text-primary);
|
||||
}
|
||||
|
||||
:root.has-custom-header-meta {
|
||||
--header-text-secondary: var(--custom-header-text-secondary);
|
||||
--color-header-text-secondary: var(--custom-header-text-secondary);
|
||||
}
|
||||
|
||||
:root.has-custom-header-bg {
|
||||
--header-background-color: var(--custom-header-background-color);
|
||||
--color-header-background: var(--custom-header-background-color);
|
||||
--header-gradient-start: var(--custom-header-gradient-start);
|
||||
--header-gradient-end: var(--custom-header-gradient-end);
|
||||
--color-header-gradient-start: var(--custom-header-gradient-start);
|
||||
--color-header-gradient-end: var(--custom-header-gradient-end);
|
||||
}
|
||||
@@ -19,6 +19,7 @@ PATHS_LOGS_FILE=./dev/log/graphql-api.log
|
||||
PATHS_CONNECT_STATUS_FILE_PATH=./dev/connectStatus.json # Connect plugin status file
|
||||
PATHS_OIDC_JSON=./dev/configs/oidc.local.json
|
||||
PATHS_LOCAL_SESSION_FILE=./dev/local-session
|
||||
PATHS_DOCKER_TEMPLATES=./dev/docker-templates
|
||||
ENVIRONMENT="development"
|
||||
NODE_ENV="development"
|
||||
PORT="3001"
|
||||
@@ -32,3 +33,4 @@ CHOKIDAR_USEPOLLING=true
|
||||
LOG_TRANSPORT=console
|
||||
LOG_LEVEL=trace
|
||||
ENABLE_NEXT_DOCKER_RELEASE=true
|
||||
SKIP_CONNECT_PLUGIN_CHECK=true
|
||||
|
||||
@@ -3,3 +3,4 @@ NODE_ENV="production"
|
||||
PORT="/var/run/unraid-api.sock"
|
||||
MOTHERSHIP_GRAPHQL_LINK="https://mothership.unraid.net/ws"
|
||||
PATHS_CONFIG_MODULES="/boot/config/plugins/dynamix.my.servers/configs"
|
||||
ENABLE_NEXT_DOCKER_RELEASE=true
|
||||
|
||||
@@ -3,3 +3,4 @@ NODE_ENV="production"
|
||||
PORT="/var/run/unraid-api.sock"
|
||||
MOTHERSHIP_GRAPHQL_LINK="https://staging.mothership.unraid.net/ws"
|
||||
PATHS_CONFIG_MODULES="/boot/config/plugins/dynamix.my.servers/configs"
|
||||
ENABLE_NEXT_DOCKER_RELEASE=true
|
||||
|
||||
@@ -8,7 +8,7 @@ export default tseslint.config(
|
||||
eslint.configs.recommended,
|
||||
...tseslint.configs.recommended,
|
||||
{
|
||||
ignores: ['src/graphql/generated/client/**/*', 'src/**/**/dummy-process.js'],
|
||||
ignores: ['src/graphql/generated/client/**/*', 'src/**/**/dummy-process.js', 'dist/**/*'],
|
||||
},
|
||||
{
|
||||
plugins: {
|
||||
|
||||
6
api/.gitignore
vendored
6
api/.gitignore
vendored
@@ -83,6 +83,8 @@ deploy/*
|
||||
|
||||
!**/*.login.*
|
||||
|
||||
# Local Development Artifacts
|
||||
|
||||
# local api configs - don't need project-wide tracking
|
||||
dev/connectStatus.json
|
||||
dev/configs/*
|
||||
@@ -96,3 +98,7 @@ dev/configs/oidc.local.json
|
||||
|
||||
# local api keys
|
||||
dev/keys/*
|
||||
# mock docker templates
|
||||
dev/docker-templates
|
||||
# ie unraid notifications
|
||||
dev/notifications
|
||||
@@ -5,3 +5,4 @@ src/unraid-api/unraid-file-modifier/modifications/__fixtures__/downloaded/*
|
||||
|
||||
# Generated Types
|
||||
src/graphql/generated/client/*.ts
|
||||
dist/
|
||||
|
||||
129
api/CHANGELOG.md
129
api/CHANGELOG.md
@@ -1,5 +1,134 @@
|
||||
# Changelog
|
||||
|
||||
## [4.29.2](https://github.com/unraid/api/compare/v4.29.1...v4.29.2) (2025-12-19)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* unraid-connect plugin not loaded when connect is installed ([#1856](https://github.com/unraid/api/issues/1856)) ([73135b8](https://github.com/unraid/api/commit/73135b832801f5c76d60020161492e4770958c3d))
|
||||
|
||||
## [4.29.1](https://github.com/unraid/api/compare/v4.29.0...v4.29.1) (2025-12-19)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* revert replace docker overview table with web component (7.3+) ([#1853](https://github.com/unraid/api/issues/1853)) ([560db88](https://github.com/unraid/api/commit/560db880cc138324f9ff8753f7209b683a84c045))
|
||||
|
||||
## [4.29.0](https://github.com/unraid/api/compare/v4.28.2...v4.29.0) (2025-12-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* replace docker overview table with web component (7.3+) ([#1764](https://github.com/unraid/api/issues/1764)) ([277ac42](https://github.com/unraid/api/commit/277ac420464379e7ee6739c4530271caf7717503))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* handle race condition between guid loading and license check ([#1847](https://github.com/unraid/api/issues/1847)) ([8b155d1](https://github.com/unraid/api/commit/8b155d1f1c99bb19efbc9614e000d852e9f0c12d))
|
||||
* resolve issue with "Continue" button when updating ([#1852](https://github.com/unraid/api/issues/1852)) ([d099e75](https://github.com/unraid/api/commit/d099e7521d2062bb9cf84f340e46b169dd2492c5))
|
||||
* update myservers config references to connect config references ([#1810](https://github.com/unraid/api/issues/1810)) ([e1e3ea7](https://github.com/unraid/api/commit/e1e3ea7eb68cc6840f67a8aec937fd3740e75b28))
|
||||
|
||||
## [4.28.2](https://github.com/unraid/api/compare/v4.28.1...v4.28.2) (2025-12-16)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **api:** timeout on startup on 7.0 and 6.12 ([#1844](https://github.com/unraid/api/issues/1844)) ([e243ae8](https://github.com/unraid/api/commit/e243ae836ec1a7fde37dceeb106cc693b20ec82b))
|
||||
|
||||
## [4.28.1](https://github.com/unraid/api/compare/v4.28.0...v4.28.1) (2025-12-16)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* empty commit to release as 4.28.1 ([df78608](https://github.com/unraid/api/commit/df786084572eefb82e086c15939b50cc08b9db10))
|
||||
|
||||
## [4.28.0](https://github.com/unraid/api/compare/v4.27.2...v4.28.0) (2025-12-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* when cancelling OS upgrade, delete any plugin files that were d… ([#1823](https://github.com/unraid/api/issues/1823)) ([74df938](https://github.com/unraid/api/commit/74df938e450def2ee3e2864d4b928f53a68e9eb8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* change keyfile watcher to poll instead of inotify on FAT32 ([#1820](https://github.com/unraid/api/issues/1820)) ([23a7120](https://github.com/unraid/api/commit/23a71207ddde221867562b722f4e65a5fc4dd744))
|
||||
* enhance dark mode support in theme handling ([#1808](https://github.com/unraid/api/issues/1808)) ([d6e2939](https://github.com/unraid/api/commit/d6e29395c8a8b0215d4f5945775de7fa358d06ec))
|
||||
* improve API startup reliability with timeout budget tracking ([#1824](https://github.com/unraid/api/issues/1824)) ([51f025b](https://github.com/unraid/api/commit/51f025b105487b178048afaabf46b260c4a7f9c1))
|
||||
* PHP Warnings in Management Settings ([#1805](https://github.com/unraid/api/issues/1805)) ([832e9d0](https://github.com/unraid/api/commit/832e9d04f207d3ec612c98500a2ffc86659264e5))
|
||||
* **plg:** explicitly stop an existing api before installation ([#1841](https://github.com/unraid/api/issues/1841)) ([99ce88b](https://github.com/unraid/api/commit/99ce88bfdc0a7f020c42f2fe0c6a0f4e32ac8f5a))
|
||||
* update @unraid/shared-callbacks to version 3.0.0 ([#1831](https://github.com/unraid/api/issues/1831)) ([73b2ce3](https://github.com/unraid/api/commit/73b2ce360c66cd9bedc138a5f8306af04b6bde77))
|
||||
* **ups:** convert estimatedRuntime from minutes to seconds ([#1822](https://github.com/unraid/api/issues/1822)) ([024ae69](https://github.com/unraid/api/commit/024ae69343bad5a3cbc19f80e357082e9b2efc1e))
|
||||
|
||||
## [4.27.2](https://github.com/unraid/api/compare/v4.27.1...v4.27.2) (2025-11-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* issue with header flashing + issue with trial date ([64875ed](https://github.com/unraid/api/commit/64875edbba786a0d1ba0113c9e9a3d38594eafcc))
|
||||
|
||||
## [4.27.1](https://github.com/unraid/api/compare/v4.27.0...v4.27.1) (2025-11-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* missing translations for expiring trials ([#1800](https://github.com/unraid/api/issues/1800)) ([36c1049](https://github.com/unraid/api/commit/36c104915ece203a3cac9e1a13e0c325e536a839))
|
||||
* resolve header flash when background color is set ([#1796](https://github.com/unraid/api/issues/1796)) ([dc9a036](https://github.com/unraid/api/commit/dc9a036c73d8ba110029364e0d044dc24c7d0dfa))
|
||||
|
||||
## [4.27.0](https://github.com/unraid/api/compare/v4.26.2...v4.27.0) (2025-11-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* remove Unraid API log download functionality ([#1793](https://github.com/unraid/api/issues/1793)) ([e4a9b82](https://github.com/unraid/api/commit/e4a9b8291b049752a9ff59b17ff50cf464fe0535))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* auto-uninstallation of connect api plugin ([#1791](https://github.com/unraid/api/issues/1791)) ([e734043](https://github.com/unraid/api/commit/e7340431a58821ec1b4f5d1b452fba6613b01fa5))
|
||||
|
||||
## [4.26.2](https://github.com/unraid/api/compare/v4.26.1...v4.26.2) (2025-11-19)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **theme:** Missing header background color ([e2fdf6c](https://github.com/unraid/api/commit/e2fdf6cadbd816559b8c82546c2bc771a81ffa9e))
|
||||
|
||||
## [4.26.1](https://github.com/unraid/api/compare/v4.26.0...v4.26.1) (2025-11-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **theme:** update theme class naming and scoping logic ([b28ef1e](https://github.com/unraid/api/commit/b28ef1ea334cb4842f01fa992effa7024185c6c9))
|
||||
|
||||
## [4.26.0](https://github.com/unraid/api/compare/v4.25.3...v4.26.0) (2025-11-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add cpu power query & subscription ([#1745](https://github.com/unraid/api/issues/1745)) ([d7aca81](https://github.com/unraid/api/commit/d7aca81c60281bfa47fb9113929c1ead6ed3361b))
|
||||
* add schema publishing to apollo studio ([#1772](https://github.com/unraid/api/issues/1772)) ([7e13202](https://github.com/unraid/api/commit/7e13202aa1c02803095bb72bb1bcb2472716f53a))
|
||||
* add workflow_dispatch trigger to schema publishing workflow ([818e7ce](https://github.com/unraid/api/commit/818e7ce997059663e07efcf1dab706bf0d7fc9da))
|
||||
* apollo studio readme link ([c4cd0c6](https://github.com/unraid/api/commit/c4cd0c63520deec15d735255f38811f0360fe3a1))
|
||||
* **cli:** make `unraid-api plugins remove` scriptable ([#1774](https://github.com/unraid/api/issues/1774)) ([64eb9ce](https://github.com/unraid/api/commit/64eb9ce9b5d1ff4fb1f08d9963522c5d32221ba7))
|
||||
* use persisted theme css to fix flashes on header ([#1784](https://github.com/unraid/api/issues/1784)) ([854b403](https://github.com/unraid/api/commit/854b403fbd85220a3012af58ce033cf0b8418516))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **api:** decode html entities before parsing notifications ([#1768](https://github.com/unraid/api/issues/1768)) ([42406e7](https://github.com/unraid/api/commit/42406e795da1e5b95622951a467722dde72d51a8))
|
||||
* **connect:** disable api plugin if unraid plugin is absent ([#1773](https://github.com/unraid/api/issues/1773)) ([c264a18](https://github.com/unraid/api/commit/c264a1843cf115e8cc1add1ab4f12fdcc932405a))
|
||||
* detection of flash backup activation state ([#1769](https://github.com/unraid/api/issues/1769)) ([d18eaf2](https://github.com/unraid/api/commit/d18eaf2364e0c04992c52af38679ff0a0c570440))
|
||||
* re-add missing header gradient styles ([#1787](https://github.com/unraid/api/issues/1787)) ([f8a6785](https://github.com/unraid/api/commit/f8a6785e9c92f81acaef76ac5eb78a4a769e69da))
|
||||
* respect OS safe mode in plugin loader ([#1775](https://github.com/unraid/api/issues/1775)) ([92af3b6](https://github.com/unraid/api/commit/92af3b61156cabae70368cf5222a2f7ac5b4d083))
|
||||
|
||||
## [4.25.3](https://github.com/unraid/unraid-api/compare/v4.25.2...v4.25.3) (2025-10-22)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* flaky watch on boot drive's dynamix config ([ec7aa06](https://github.com/unraid/unraid-api/commit/ec7aa06d4a5fb1f0e84420266b0b0d7ee09a3663))
|
||||
|
||||
## [4.25.2](https://github.com/unraid/api/compare/v4.25.1...v4.25.2) (2025-09-30)
|
||||
|
||||
|
||||
|
||||
@@ -75,6 +75,16 @@ If you found this file you're likely a developer. If you'd like to know more abo
|
||||
|
||||
- Run `pnpm --filter @unraid/api i18n:extract` to scan the Nest.js source for translation helper usages and update `src/i18n/en.json` with any new keys. The extractor keeps existing translations intact and appends new keys with their English source text.
|
||||
|
||||
## Developer Documentation
|
||||
|
||||
For detailed information about specific features:
|
||||
|
||||
- [API Plugins](docs/developer/api-plugins.md) - Working with API plugins and workspace packages
|
||||
- [Docker Feature](docs/developer/docker.md) - Container management, GraphQL API, and WebGUI integration
|
||||
- [Feature Flags](docs/developer/feature-flags.md) - Conditionally enabling functionality
|
||||
- [Repository Organization](docs/developer/repo-organization.md) - Codebase structure
|
||||
- [Development Workflows](docs/developer/workflows.md) - Development processes
|
||||
|
||||
## License
|
||||
|
||||
Copyright Lime Technology Inc. All rights reserved.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": "4.25.2",
|
||||
"version": "4.29.2",
|
||||
"extraOrigins": [],
|
||||
"sandbox": true,
|
||||
"ssoSubIds": [],
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
timestamp=1730937600
|
||||
event=Hashtag Test
|
||||
subject=Warning [UNRAID] - #1 OS is cooking
|
||||
description=Disk 1 temperature has reached #epic # levels of proportion
|
||||
importance=warning
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
timestamp=1730937600
|
||||
event=Temperature Test
|
||||
subject=Warning [UNRAID] - High disk temperature detected: 45 °C
|
||||
description=Disk 1 temperature has reached 45 °C (threshold: 40 °C)<br><br>Current temperatures:<br>Parity - 32 °C [OK]<br>Disk 1 - 45 °C [WARNING]<br>Disk 2 - 38 °C [OK]<br>Cache - 28 °C [OK]<br><br>Please check cooling system.
|
||||
importance=warning
|
||||
|
||||
555
api/docs/developer/docker.md
Normal file
555
api/docs/developer/docker.md
Normal file
@@ -0,0 +1,555 @@
|
||||
# Docker Feature
|
||||
|
||||
The Docker feature provides complete container management for Unraid through a GraphQL API, including lifecycle operations, real-time monitoring, update detection, and organizational tools.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Architecture](#architecture)
|
||||
- [Module Structure](#module-structure)
|
||||
- [Data Flow](#data-flow)
|
||||
- [Core Services](#core-services)
|
||||
- [DockerService](#dockerservice)
|
||||
- [DockerNetworkService](#dockernetworkservice)
|
||||
- [DockerPortService](#dockerportservice)
|
||||
- [DockerLogService](#dockerlogservice)
|
||||
- [DockerStatsService](#dockerstatsservice)
|
||||
- [DockerAutostartService](#dockerautostartservice)
|
||||
- [DockerConfigService](#dockerconfigservice)
|
||||
- [DockerManifestService](#dockermanifestservice)
|
||||
- [DockerPhpService](#dockerphpservice)
|
||||
- [DockerTailscaleService](#dockertailscaleservice)
|
||||
- [DockerTemplateScannerService](#dockertemplatescannerservice)
|
||||
- [DockerOrganizerService](#dockerorganizerservice)
|
||||
- [GraphQL API](#graphql-api)
|
||||
- [Queries](#queries)
|
||||
- [Mutations](#mutations)
|
||||
- [Subscriptions](#subscriptions)
|
||||
- [Data Models](#data-models)
|
||||
- [DockerContainer](#dockercontainer)
|
||||
- [ContainerState](#containerstate)
|
||||
- [ContainerPort](#containerport)
|
||||
- [DockerPortConflicts](#dockerportconflicts)
|
||||
- [Caching Strategy](#caching-strategy)
|
||||
- [WebGUI Integration](#webgui-integration)
|
||||
- [File Modification](#file-modification)
|
||||
- [PHP Integration](#php-integration)
|
||||
- [Permissions](#permissions)
|
||||
- [Configuration Files](#configuration-files)
|
||||
- [Development](#development)
|
||||
- [Adding a New Docker Service](#adding-a-new-docker-service)
|
||||
- [Testing](#testing)
|
||||
- [Feature Flag Testing](#feature-flag-testing)
|
||||
|
||||
## Overview
|
||||
|
||||
**Location:** `src/unraid-api/graph/resolvers/docker/`
|
||||
|
||||
**Feature Flag:** Many next-generation features are gated behind `ENABLE_NEXT_DOCKER_RELEASE`. See [Feature Flags](./feature-flags.md) for details on enabling.
|
||||
|
||||
**Key Capabilities:**
|
||||
|
||||
- Container lifecycle management (start, stop, pause, update, remove)
|
||||
- Real-time container stats streaming
|
||||
- Network and port conflict detection
|
||||
- Container log retrieval
|
||||
- Automatic update detection via digest comparison
|
||||
- Tailscale container integration
|
||||
- Container organization with folders and views
|
||||
- Template-based metadata resolution
|
||||
|
||||
## Architecture
|
||||
|
||||
### Module Structure
|
||||
|
||||
The Docker module (`docker.module.ts`) serves as the entry point and exports:
|
||||
|
||||
- **13 services** for various Docker operations
|
||||
- **3 resolvers** for GraphQL query/mutation/subscription handling
|
||||
|
||||
**Dependencies:**
|
||||
|
||||
- `JobModule` - Background job scheduling
|
||||
- `NotificationsModule` - User notifications
|
||||
- `ServicesModule` - Shared service utilities
|
||||
|
||||
### Data Flow
|
||||
|
||||
```text
|
||||
Docker Daemon (Unix Socket)
|
||||
↓
|
||||
dockerode library
|
||||
↓
|
||||
DockerService (transform & cache)
|
||||
↓
|
||||
GraphQL Resolvers
|
||||
↓
|
||||
Client Applications
|
||||
```
|
||||
|
||||
The API communicates with the Docker daemon through the `dockerode` library via Unix socket. Container data is transformed from raw Docker API format to GraphQL types, enriched with Unraid-specific metadata (templates, autostart config), and cached for performance.
|
||||
|
||||
## Core Services
|
||||
|
||||
### DockerService
|
||||
|
||||
**File:** `docker.service.ts`
|
||||
|
||||
Central orchestrator for all container operations.
|
||||
|
||||
**Key Methods:**
|
||||
|
||||
- `getContainers(skipCache?, includeSize?)` - List containers with caching
|
||||
- `start(id)`, `stop(id)`, `pause(id)`, `unpause(id)` - Lifecycle operations
|
||||
- `updateContainer(id)`, `updateContainers(ids)`, `updateAllContainers()` - Image updates
|
||||
- `removeContainer(id, withImage?)` - Remove container and optionally its image
|
||||
|
||||
**Caching:**
|
||||
|
||||
- Cache TTL: 60 seconds (60000ms)
|
||||
- Cache keys: `docker_containers`, `docker_containers_with_size`
|
||||
- Invalidated automatically on mutations
|
||||
|
||||
### DockerNetworkService
|
||||
|
||||
**File:** `docker-network.service.ts`
|
||||
|
||||
Lists Docker networks with metadata including driver, scope, IPAM settings, and connected containers.
|
||||
|
||||
**Caching:** 60 seconds
|
||||
|
||||
### DockerPortService
|
||||
|
||||
**File:** `docker-port.service.ts`
|
||||
|
||||
Detects port conflicts between containers and with the host.
|
||||
|
||||
**Features:**
|
||||
|
||||
- Deduplicates port mappings from Docker API
|
||||
- Identifies container-to-container conflicts
|
||||
- Detects host-level port collisions
|
||||
- Separates TCP and UDP conflicts
|
||||
- Calculates LAN-accessible IP:port combinations
|
||||
|
||||
### DockerLogService
|
||||
|
||||
**File:** `docker-log.service.ts`
|
||||
|
||||
Retrieves container logs with configurable options.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `tail` - Number of lines (default: 200, max: 2000)
|
||||
- `since` - Timestamp filter for log entries
|
||||
|
||||
**Additional Features:**
|
||||
|
||||
- Calculates container log file sizes
|
||||
- Supports timestamp-based filtering
|
||||
|
||||
### DockerStatsService
|
||||
|
||||
**File:** `docker-stats.service.ts`
|
||||
|
||||
Provides real-time container statistics via GraphQL subscription.
|
||||
|
||||
**Metrics:**
|
||||
|
||||
- CPU percentage
|
||||
- Memory usage and limit
|
||||
- Network I/O (received/transmitted bytes)
|
||||
- Block I/O (read/written bytes)
|
||||
|
||||
**Implementation:**
|
||||
|
||||
- Spawns `docker stats` process with streaming output
|
||||
- Publishes to `PUBSUB_CHANNEL.DOCKER_STATS`
|
||||
- Auto-starts on first subscriber, stops when last disconnects
|
||||
|
||||
### DockerAutostartService
|
||||
|
||||
**File:** `docker-autostart.service.ts`
|
||||
|
||||
Manages container auto-start configuration.
|
||||
|
||||
**Features:**
|
||||
|
||||
- Parses auto-start file format (name + wait time per line)
|
||||
- Maintains auto-start order and wait times
|
||||
- Persists configuration changes
|
||||
- Tracks container primary names
|
||||
|
||||
### DockerConfigService
|
||||
|
||||
**File:** `docker-config.service.ts`
|
||||
|
||||
Persistent configuration management using `ConfigFilePersister`.
|
||||
|
||||
**Configuration Options:**
|
||||
|
||||
- `templateMappings` - Container name to template file path mappings
|
||||
- `skipTemplatePaths` - Containers excluded from template scanning
|
||||
- `updateCheckCronSchedule` - Cron expression for digest refresh (default: daily at 6am)
|
||||
|
||||
### DockerManifestService
|
||||
|
||||
**File:** `docker-manifest.service.ts`
|
||||
|
||||
Detects available container image updates.
|
||||
|
||||
**Implementation:**
|
||||
|
||||
- Compares local and remote image SHA256 digests
|
||||
- Reads cached status from `/var/lib/docker/unraid-update-status.json`
|
||||
- Triggers refresh via PHP integration
|
||||
|
||||
### DockerPhpService
|
||||
|
||||
**File:** `docker-php.service.ts`
|
||||
|
||||
Integration with legacy Unraid PHP Docker scripts.
|
||||
|
||||
**PHP Scripts Used:**
|
||||
|
||||
- `DockerUpdate.php` - Refresh container digests
|
||||
- `DockerContainers.php` - Get update statuses
|
||||
|
||||
**Update Statuses:**
|
||||
|
||||
- `UP_TO_DATE` - Container is current
|
||||
- `UPDATE_AVAILABLE` - New image available
|
||||
- `REBUILD_READY` - Rebuild required
|
||||
- `UNKNOWN` - Status could not be determined
|
||||
|
||||
### DockerTailscaleService
|
||||
|
||||
**File:** `docker-tailscale.service.ts`
|
||||
|
||||
Detects and monitors Tailscale-enabled containers.
|
||||
|
||||
**Detection Methods:**
|
||||
|
||||
- Container labels indicating Tailscale
|
||||
- Tailscale socket mount points
|
||||
|
||||
**Status Information:**
|
||||
|
||||
- Tailscale version and backend state
|
||||
- Hostname and DNS name
|
||||
- Exit node status
|
||||
- Key expiry dates
|
||||
|
||||
**Caching:**
|
||||
|
||||
- Status cache: 30 seconds
|
||||
- DERP map and versions: 24 hours
|
||||
|
||||
### DockerTemplateScannerService
|
||||
|
||||
**File:** `docker-template-scanner.service.ts`
|
||||
|
||||
Maps containers to their template files for metadata resolution.
|
||||
|
||||
**Bootstrap Process:**
|
||||
|
||||
1. Runs 5 seconds after app startup
|
||||
2. Scans XML templates from configured paths
|
||||
3. Parses container/image names from XML
|
||||
4. Matches against running containers
|
||||
5. Stores mappings in `docker.config.json`
|
||||
|
||||
**Template Metadata Resolved:**
|
||||
|
||||
- `projectUrl`, `registryUrl`, `supportUrl`
|
||||
- `iconUrl`, `webUiUrl`, `shell`
|
||||
- Template port mappings
|
||||
|
||||
**Orphaned Containers:**
|
||||
|
||||
Containers without matching templates are marked as "orphaned" in the API response.
|
||||
|
||||
### DockerOrganizerService
|
||||
|
||||
**File:** `organizer/docker-organizer.service.ts`
|
||||
|
||||
Container organization system for UI views.
|
||||
|
||||
**Features:**
|
||||
|
||||
- Hierarchical folder structure
|
||||
- Multiple views with different layouts
|
||||
- Position-based organization
|
||||
- View-specific preferences (sorting, filtering)
|
||||
|
||||
## GraphQL API
|
||||
|
||||
### Queries
|
||||
|
||||
```graphql
|
||||
type Query {
|
||||
docker: Docker!
|
||||
}
|
||||
|
||||
type Docker {
|
||||
containers(skipCache: Boolean): [DockerContainer!]!
|
||||
container(id: PrefixedID!): DockerContainer # Feature-flagged
|
||||
networks(skipCache: Boolean): [DockerNetwork!]!
|
||||
portConflicts(skipCache: Boolean): DockerPortConflicts!
|
||||
logs(id: PrefixedID!, since: Int, tail: Int): DockerContainerLogs!
|
||||
organizer(skipCache: Boolean): DockerOrganizer! # Feature-flagged
|
||||
containerUpdateStatuses: [ContainerUpdateStatus!]! # Feature-flagged
|
||||
}
|
||||
```
|
||||
|
||||
### Mutations
|
||||
|
||||
**Container Lifecycle:**
|
||||
|
||||
```graphql
|
||||
type Mutation {
|
||||
start(id: PrefixedID!): DockerContainer!
|
||||
stop(id: PrefixedID!): DockerContainer!
|
||||
pause(id: PrefixedID!): DockerContainer!
|
||||
unpause(id: PrefixedID!): DockerContainer!
|
||||
removeContainer(id: PrefixedID!, withImage: Boolean): Boolean!
|
||||
}
|
||||
```
|
||||
|
||||
**Container Updates:**
|
||||
|
||||
```graphql
|
||||
type Mutation {
|
||||
updateContainer(id: PrefixedID!): DockerContainer!
|
||||
updateContainers(ids: [PrefixedID!]!): [DockerContainer!]!
|
||||
updateAllContainers: [DockerContainer!]!
|
||||
refreshDockerDigests: Boolean!
|
||||
}
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
|
||||
```graphql
|
||||
type Mutation {
|
||||
updateAutostartConfiguration(
|
||||
entries: [AutostartEntry!]!
|
||||
persistUserPreferences: Boolean
|
||||
): Boolean!
|
||||
syncDockerTemplatePaths: Boolean!
|
||||
resetDockerTemplateMappings: Boolean!
|
||||
}
|
||||
```
|
||||
|
||||
**Organizer (Feature-flagged):**
|
||||
|
||||
```graphql
|
||||
type Mutation {
|
||||
createDockerFolder(name: String!, parentId: ID, childrenIds: [ID!]): DockerFolder!
|
||||
createDockerFolderWithItems(
|
||||
name: String!
|
||||
parentId: ID
|
||||
sourceEntryIds: [ID!]
|
||||
position: Int
|
||||
): DockerFolder!
|
||||
setDockerFolderChildren(folderId: ID!, childrenIds: [ID!]!): DockerFolder!
|
||||
deleteDockerEntries(entryIds: [ID!]!): Boolean!
|
||||
moveDockerEntriesToFolder(sourceEntryIds: [ID!]!, destinationFolderId: ID!): Boolean!
|
||||
moveDockerItemsToPosition(
|
||||
sourceEntryIds: [ID!]!
|
||||
destinationFolderId: ID!
|
||||
position: Int!
|
||||
): Boolean!
|
||||
renameDockerFolder(folderId: ID!, newName: String!): DockerFolder!
|
||||
updateDockerViewPreferences(viewId: ID!, prefs: ViewPreferencesInput!): Boolean!
|
||||
}
|
||||
```
|
||||
|
||||
### Subscriptions
|
||||
|
||||
```graphql
|
||||
type Subscription {
|
||||
dockerContainerStats: DockerContainerStats!
|
||||
}
|
||||
```
|
||||
|
||||
Real-time container statistics stream. Automatically starts when first client subscribes and stops when last client disconnects.
|
||||
|
||||
## Data Models
|
||||
|
||||
### DockerContainer
|
||||
|
||||
Primary container representation with 24+ fields:
|
||||
|
||||
```typescript
|
||||
{
|
||||
id: PrefixedID
|
||||
names: [String!]!
|
||||
image: String!
|
||||
imageId: String!
|
||||
state: ContainerState!
|
||||
status: String!
|
||||
created: Float!
|
||||
|
||||
// Networking
|
||||
ports: [ContainerPort!]!
|
||||
lanIpPorts: [ContainerPort!]!
|
||||
hostConfig: ContainerHostConfig
|
||||
networkSettings: DockerNetworkSettings
|
||||
|
||||
// Storage
|
||||
sizeRootFs: Float
|
||||
sizeRw: Float
|
||||
sizeLog: Float
|
||||
mounts: [ContainerMount!]!
|
||||
|
||||
// Metadata
|
||||
labels: JSON
|
||||
|
||||
// Auto-start
|
||||
autoStart: Boolean!
|
||||
autoStartOrder: Int
|
||||
autoStartWait: Int
|
||||
|
||||
// Template Integration
|
||||
templatePath: String
|
||||
isOrphaned: Boolean!
|
||||
projectUrl: String
|
||||
registryUrl: String
|
||||
supportUrl: String
|
||||
iconUrl: String
|
||||
webUiUrl: String
|
||||
shell: String
|
||||
templatePorts: [ContainerPort!]
|
||||
|
||||
// Tailscale
|
||||
tailscaleEnabled: Boolean!
|
||||
tailscaleStatus: TailscaleStatus
|
||||
|
||||
// Updates
|
||||
isUpdateAvailable: Boolean
|
||||
isRebuildReady: Boolean
|
||||
}
|
||||
```
|
||||
|
||||
### ContainerState
|
||||
|
||||
```typescript
|
||||
enum ContainerState {
|
||||
RUNNING
|
||||
PAUSED
|
||||
EXITED
|
||||
}
|
||||
```
|
||||
|
||||
### ContainerPort
|
||||
|
||||
```typescript
|
||||
{
|
||||
ip: String
|
||||
privatePort: Int!
|
||||
publicPort: Int
|
||||
type: String! // "tcp" or "udp"
|
||||
}
|
||||
```
|
||||
|
||||
### DockerPortConflicts
|
||||
|
||||
```typescript
|
||||
{
|
||||
containerConflicts: [DockerContainerPortConflict!]!
|
||||
lanConflicts: [DockerLanPortConflict!]!
|
||||
}
|
||||
```
|
||||
|
||||
## Caching Strategy
|
||||
|
||||
The Docker feature uses `cache-manager` v7 for performance optimization.
|
||||
|
||||
**Important:** cache-manager v7 expects TTL values in **milliseconds**, not seconds.
|
||||
|
||||
| Cache Key | TTL | Invalidation |
|
||||
|-----------|-----|--------------|
|
||||
| `docker_containers` | 60s | On any container mutation |
|
||||
| `docker_containers_with_size` | 60s | On any container mutation |
|
||||
| `docker_networks` | 60s | On network changes |
|
||||
| Tailscale status | 30s | Automatic |
|
||||
| Tailscale DERP/versions | 24h | Automatic |
|
||||
|
||||
**Cache Invalidation Triggers:**
|
||||
|
||||
- `start()`, `stop()`, `pause()`, `unpause()`
|
||||
- `updateContainer()`, `updateContainers()`, `updateAllContainers()`
|
||||
- `removeContainer()`
|
||||
- `updateAutostartConfiguration()`
|
||||
|
||||
## WebGUI Integration
|
||||
|
||||
### File Modification
|
||||
|
||||
**File:** `unraid-file-modifier/modifications/docker-containers-page.modification.ts`
|
||||
|
||||
**Target:** `/usr/local/emhttp/plugins/dynamix.docker.manager/DockerContainers.page`
|
||||
|
||||
When `ENABLE_NEXT_DOCKER_RELEASE` is enabled and Unraid version is 7.3.0+, the modification:
|
||||
|
||||
1. Replaces the legacy Docker containers page
|
||||
2. Injects the Vue web component: `<unraid-docker-container-overview>`
|
||||
3. Retains the `Nchan="docker_load"` page attribute (an emhttp/WebGUI feature for real-time updates, not controlled by the API)
|
||||
|
||||
### PHP Integration
|
||||
|
||||
The API integrates with legacy Unraid PHP scripts for certain operations:
|
||||
|
||||
- **Digest refresh:** Calls `DockerUpdate.php` to refresh container image digests
|
||||
- **Update status:** Reads from `DockerContainers.php` output
|
||||
|
||||
## Permissions
|
||||
|
||||
All Docker operations are protected with permission checks:
|
||||
|
||||
| Operation | Resource | Action |
|
||||
|-----------|----------|--------|
|
||||
| Read containers/networks | `Resource.DOCKER` | `AuthAction.READ_ANY` |
|
||||
| Start/stop/pause/update | `Resource.DOCKER` | `AuthAction.UPDATE_ANY` |
|
||||
| Remove containers | `Resource.DOCKER` | `AuthAction.DELETE_ANY` |
|
||||
|
||||
## Configuration Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `docker.config.json` | Template mappings, skip paths, cron schedule |
|
||||
| `docker.organizer.json` | Container organization tree and views |
|
||||
| `/var/lib/docker/unraid-update-status.json` | Cached container update statuses |
|
||||
|
||||
## Development
|
||||
|
||||
### Adding a New Docker Service
|
||||
|
||||
1. Create service file in `src/unraid-api/graph/resolvers/docker/`
|
||||
2. Add to `docker.module.ts` providers and exports
|
||||
3. Inject into resolvers as needed
|
||||
4. Add GraphQL types to `docker.model.ts` if needed
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Run Docker-related tests
|
||||
pnpm --filter ./api test -- src/unraid-api/graph/resolvers/docker/
|
||||
|
||||
# Run specific test file
|
||||
pnpm --filter ./api test -- src/unraid-api/graph/resolvers/docker/docker.service.spec.ts
|
||||
```
|
||||
|
||||
### Feature Flag Testing
|
||||
|
||||
To test next-generation Docker features locally:
|
||||
|
||||
```bash
|
||||
ENABLE_NEXT_DOCKER_RELEASE=true unraid-api start
|
||||
```
|
||||
|
||||
Or add to `.env`:
|
||||
|
||||
```env
|
||||
ENABLE_NEXT_DOCKER_RELEASE=true
|
||||
```
|
||||
@@ -62,15 +62,18 @@ To build all packages in the monorepo:
|
||||
pnpm build
|
||||
```
|
||||
|
||||
### Watch Mode Building
|
||||
### Plugin Building (Docker Required)
|
||||
|
||||
For continuous building during development:
|
||||
The plugin build requires Docker. This command automatically builds all dependencies (API, web) before starting Docker:
|
||||
|
||||
```bash
|
||||
pnpm build:watch
|
||||
cd plugin
|
||||
pnpm run docker:build-and-run
|
||||
# Then inside the container:
|
||||
pnpm build
|
||||
```
|
||||
|
||||
This is useful when you want to see your changes reflected without manually rebuilding. This will also allow you to install a local plugin to test your changes.
|
||||
This serves the plugin at `http://YOUR_IP:5858/` for installation on your Unraid server.
|
||||
|
||||
### Package-Specific Building
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"cwd": "/usr/local/unraid-api",
|
||||
"exec_mode": "fork",
|
||||
"wait_ready": true,
|
||||
"listen_timeout": 15000,
|
||||
"listen_timeout": 30000,
|
||||
"max_restarts": 10,
|
||||
"min_uptime": 10000,
|
||||
"watch": false,
|
||||
|
||||
@@ -862,6 +862,38 @@ type DockerMutations {
|
||||
|
||||
"""Stop a container"""
|
||||
stop(id: PrefixedID!): DockerContainer!
|
||||
|
||||
"""Pause (Suspend) a container"""
|
||||
pause(id: PrefixedID!): DockerContainer!
|
||||
|
||||
"""Unpause (Resume) a container"""
|
||||
unpause(id: PrefixedID!): DockerContainer!
|
||||
|
||||
"""Remove a container"""
|
||||
removeContainer(id: PrefixedID!, withImage: Boolean): Boolean!
|
||||
|
||||
"""Update auto-start configuration for Docker containers"""
|
||||
updateAutostartConfiguration(entries: [DockerAutostartEntryInput!]!, persistUserPreferences: Boolean): Boolean!
|
||||
|
||||
"""Update a container to the latest image"""
|
||||
updateContainer(id: PrefixedID!): DockerContainer!
|
||||
|
||||
"""Update multiple containers to the latest images"""
|
||||
updateContainers(ids: [PrefixedID!]!): [DockerContainer!]!
|
||||
|
||||
"""Update all containers that have available updates"""
|
||||
updateAllContainers: [DockerContainer!]!
|
||||
}
|
||||
|
||||
input DockerAutostartEntryInput {
|
||||
"""Docker container identifier"""
|
||||
id: PrefixedID!
|
||||
|
||||
"""Whether the container should auto-start"""
|
||||
autoStart: Boolean!
|
||||
|
||||
"""Number of seconds to wait after starting the container"""
|
||||
wait: Int
|
||||
}
|
||||
|
||||
type VmMutations {
|
||||
@@ -944,6 +976,23 @@ input UpdateApiKeyInput {
|
||||
permissions: [AddPermissionInput!]
|
||||
}
|
||||
|
||||
"""Customization related mutations"""
|
||||
type CustomizationMutations {
|
||||
"""Update the UI theme (writes dynamix.cfg)"""
|
||||
setTheme(
|
||||
"""Theme to apply"""
|
||||
theme: ThemeName!
|
||||
): Theme!
|
||||
}
|
||||
|
||||
"""The theme name"""
|
||||
enum ThemeName {
|
||||
azure
|
||||
black
|
||||
gray
|
||||
white
|
||||
}
|
||||
|
||||
"""
|
||||
Parity check related mutations, WIP, response types and functionaliy will change
|
||||
"""
|
||||
@@ -1042,14 +1091,6 @@ type Theme {
|
||||
headerSecondaryTextColor: String
|
||||
}
|
||||
|
||||
"""The theme name"""
|
||||
enum ThemeName {
|
||||
azure
|
||||
black
|
||||
gray
|
||||
white
|
||||
}
|
||||
|
||||
type ExplicitStatusItem {
|
||||
name: String!
|
||||
updateStatus: UpdateStatus!
|
||||
@@ -1080,6 +1121,29 @@ enum ContainerPortType {
|
||||
UDP
|
||||
}
|
||||
|
||||
type DockerPortConflictContainer {
|
||||
id: PrefixedID!
|
||||
name: String!
|
||||
}
|
||||
|
||||
type DockerContainerPortConflict {
|
||||
privatePort: Port!
|
||||
type: ContainerPortType!
|
||||
containers: [DockerPortConflictContainer!]!
|
||||
}
|
||||
|
||||
type DockerLanPortConflict {
|
||||
lanIpPort: String!
|
||||
publicPort: Port
|
||||
type: ContainerPortType!
|
||||
containers: [DockerPortConflictContainer!]!
|
||||
}
|
||||
|
||||
type DockerPortConflicts {
|
||||
containerPorts: [DockerContainerPortConflict!]!
|
||||
lanPorts: [DockerLanPortConflict!]!
|
||||
}
|
||||
|
||||
type ContainerHostConfig {
|
||||
networkMode: String!
|
||||
}
|
||||
@@ -1093,8 +1157,17 @@ type DockerContainer implements Node {
|
||||
created: Int!
|
||||
ports: [ContainerPort!]!
|
||||
|
||||
"""List of LAN-accessible host:port values"""
|
||||
lanIpPorts: [String!]
|
||||
|
||||
"""Total size of all files in the container (in bytes)"""
|
||||
sizeRootFs: BigInt
|
||||
|
||||
"""Size of writable layer (in bytes)"""
|
||||
sizeRw: BigInt
|
||||
|
||||
"""Size of container logs (in bytes)"""
|
||||
sizeLog: BigInt
|
||||
labels: JSON
|
||||
state: ContainerState!
|
||||
status: String!
|
||||
@@ -1102,12 +1175,50 @@ type DockerContainer implements Node {
|
||||
networkSettings: JSON
|
||||
mounts: [JSON!]
|
||||
autoStart: Boolean!
|
||||
|
||||
"""Zero-based order in the auto-start list"""
|
||||
autoStartOrder: Int
|
||||
|
||||
"""Wait time in seconds applied after start"""
|
||||
autoStartWait: Int
|
||||
templatePath: String
|
||||
|
||||
"""Project/Product homepage URL"""
|
||||
projectUrl: String
|
||||
|
||||
"""Registry/Docker Hub URL"""
|
||||
registryUrl: String
|
||||
|
||||
"""Support page/thread URL"""
|
||||
supportUrl: String
|
||||
|
||||
"""Icon URL"""
|
||||
iconUrl: String
|
||||
|
||||
"""Resolved WebUI URL from template"""
|
||||
webUiUrl: String
|
||||
|
||||
"""Shell to use for console access (from template)"""
|
||||
shell: String
|
||||
|
||||
"""Port mappings from template (used when container is not running)"""
|
||||
templatePorts: [ContainerPort!]
|
||||
|
||||
"""Whether the container is orphaned (no template found)"""
|
||||
isOrphaned: Boolean!
|
||||
isUpdateAvailable: Boolean
|
||||
isRebuildReady: Boolean
|
||||
|
||||
"""Whether Tailscale is enabled for this container"""
|
||||
tailscaleEnabled: Boolean!
|
||||
|
||||
"""Tailscale status for this container (fetched via docker exec)"""
|
||||
tailscaleStatus(forceRefresh: Boolean = false): TailscaleStatus
|
||||
}
|
||||
|
||||
enum ContainerState {
|
||||
RUNNING
|
||||
PAUSED
|
||||
EXITED
|
||||
}
|
||||
|
||||
@@ -1129,49 +1240,213 @@ type DockerNetwork implements Node {
|
||||
labels: JSON!
|
||||
}
|
||||
|
||||
type DockerContainerLogLine {
|
||||
timestamp: DateTime!
|
||||
message: String!
|
||||
}
|
||||
|
||||
type DockerContainerLogs {
|
||||
containerId: PrefixedID!
|
||||
lines: [DockerContainerLogLine!]!
|
||||
|
||||
"""
|
||||
Cursor that can be passed back through the since argument to continue streaming logs.
|
||||
"""
|
||||
cursor: DateTime
|
||||
}
|
||||
|
||||
type DockerContainerStats {
|
||||
id: PrefixedID!
|
||||
|
||||
"""CPU Usage Percentage"""
|
||||
cpuPercent: Float!
|
||||
|
||||
"""Memory Usage String (e.g. 100MB / 1GB)"""
|
||||
memUsage: String!
|
||||
|
||||
"""Memory Usage Percentage"""
|
||||
memPercent: Float!
|
||||
|
||||
"""Network I/O String (e.g. 100MB / 1GB)"""
|
||||
netIO: String!
|
||||
|
||||
"""Block I/O String (e.g. 100MB / 1GB)"""
|
||||
blockIO: String!
|
||||
}
|
||||
|
||||
"""Tailscale exit node connection status"""
|
||||
type TailscaleExitNodeStatus {
|
||||
"""Whether the exit node is online"""
|
||||
online: Boolean!
|
||||
|
||||
"""Tailscale IPs of the exit node"""
|
||||
tailscaleIps: [String!]
|
||||
}
|
||||
|
||||
"""Tailscale status for a Docker container"""
|
||||
type TailscaleStatus {
|
||||
"""Whether Tailscale is online in the container"""
|
||||
online: Boolean!
|
||||
|
||||
"""Current Tailscale version"""
|
||||
version: String
|
||||
|
||||
"""Latest available Tailscale version"""
|
||||
latestVersion: String
|
||||
|
||||
"""Whether a Tailscale update is available"""
|
||||
updateAvailable: Boolean!
|
||||
|
||||
"""Configured Tailscale hostname"""
|
||||
hostname: String
|
||||
|
||||
"""Actual Tailscale DNS name"""
|
||||
dnsName: String
|
||||
|
||||
"""DERP relay code"""
|
||||
relay: String
|
||||
|
||||
"""DERP relay region name"""
|
||||
relayName: String
|
||||
|
||||
"""Tailscale IPv4 and IPv6 addresses"""
|
||||
tailscaleIps: [String!]
|
||||
|
||||
"""Advertised subnet routes"""
|
||||
primaryRoutes: [String!]
|
||||
|
||||
"""Whether this container is an exit node"""
|
||||
isExitNode: Boolean!
|
||||
|
||||
"""Status of the connected exit node (if using one)"""
|
||||
exitNodeStatus: TailscaleExitNodeStatus
|
||||
|
||||
"""Tailscale Serve/Funnel WebUI URL"""
|
||||
webUiUrl: String
|
||||
|
||||
"""Tailscale key expiry date"""
|
||||
keyExpiry: DateTime
|
||||
|
||||
"""Days until key expires"""
|
||||
keyExpiryDays: Int
|
||||
|
||||
"""Whether the Tailscale key has expired"""
|
||||
keyExpired: Boolean!
|
||||
|
||||
"""Tailscale backend state (Running, NeedsLogin, Stopped, etc.)"""
|
||||
backendState: String
|
||||
|
||||
"""Authentication URL if Tailscale needs login"""
|
||||
authUrl: String
|
||||
}
|
||||
|
||||
type Docker implements Node {
|
||||
id: PrefixedID!
|
||||
containers(skipCache: Boolean! = false): [DockerContainer!]!
|
||||
networks(skipCache: Boolean! = false): [DockerNetwork!]!
|
||||
organizer: ResolvedOrganizerV1!
|
||||
portConflicts(skipCache: Boolean! = false): DockerPortConflicts!
|
||||
|
||||
"""
|
||||
Access container logs. Requires specifying a target container id through resolver arguments.
|
||||
"""
|
||||
logs(id: PrefixedID!, since: DateTime, tail: Int): DockerContainerLogs!
|
||||
container(id: PrefixedID!): DockerContainer
|
||||
organizer(skipCache: Boolean! = false): ResolvedOrganizerV1!
|
||||
containerUpdateStatuses: [ExplicitStatusItem!]!
|
||||
}
|
||||
|
||||
type DockerTemplateSyncResult {
|
||||
scanned: Int!
|
||||
matched: Int!
|
||||
skipped: Int!
|
||||
errors: [String!]!
|
||||
}
|
||||
|
||||
type ResolvedOrganizerView {
|
||||
id: String!
|
||||
name: String!
|
||||
root: ResolvedOrganizerEntry!
|
||||
rootId: String!
|
||||
flatEntries: [FlatOrganizerEntry!]!
|
||||
prefs: JSON
|
||||
}
|
||||
|
||||
union ResolvedOrganizerEntry = ResolvedOrganizerFolder | OrganizerContainerResource | OrganizerResource
|
||||
|
||||
type ResolvedOrganizerFolder {
|
||||
id: String!
|
||||
type: String!
|
||||
name: String!
|
||||
children: [ResolvedOrganizerEntry!]!
|
||||
}
|
||||
|
||||
type OrganizerContainerResource {
|
||||
id: String!
|
||||
type: String!
|
||||
name: String!
|
||||
meta: DockerContainer
|
||||
}
|
||||
|
||||
type OrganizerResource {
|
||||
id: String!
|
||||
type: String!
|
||||
name: String!
|
||||
meta: JSON
|
||||
}
|
||||
|
||||
type ResolvedOrganizerV1 {
|
||||
version: Float!
|
||||
views: [ResolvedOrganizerView!]!
|
||||
}
|
||||
|
||||
type FlatOrganizerEntry {
|
||||
id: String!
|
||||
type: String!
|
||||
name: String!
|
||||
parentId: String
|
||||
depth: Float!
|
||||
position: Float!
|
||||
path: [String!]!
|
||||
hasChildren: Boolean!
|
||||
childrenIds: [String!]!
|
||||
meta: DockerContainer
|
||||
}
|
||||
|
||||
type NotificationCounts {
|
||||
info: Int!
|
||||
warning: Int!
|
||||
alert: Int!
|
||||
total: Int!
|
||||
}
|
||||
|
||||
type NotificationOverview {
|
||||
unread: NotificationCounts!
|
||||
archive: NotificationCounts!
|
||||
}
|
||||
|
||||
type Notification implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""Also known as 'event'"""
|
||||
title: String!
|
||||
subject: String!
|
||||
description: String!
|
||||
importance: NotificationImportance!
|
||||
link: String
|
||||
type: NotificationType!
|
||||
|
||||
"""ISO Timestamp for when the notification occurred"""
|
||||
timestamp: String
|
||||
formattedTimestamp: String
|
||||
}
|
||||
|
||||
enum NotificationImportance {
|
||||
ALERT
|
||||
INFO
|
||||
WARNING
|
||||
}
|
||||
|
||||
enum NotificationType {
|
||||
UNREAD
|
||||
ARCHIVE
|
||||
}
|
||||
|
||||
type Notifications implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""A cached overview of the notifications in the system & their severity."""
|
||||
overview: NotificationOverview!
|
||||
list(filter: NotificationFilter!): [Notification!]!
|
||||
|
||||
"""
|
||||
Deduplicated list of unread warning and alert notifications, sorted latest first.
|
||||
"""
|
||||
warningsAndAlerts: [Notification!]!
|
||||
}
|
||||
|
||||
input NotificationFilter {
|
||||
importance: NotificationImportance
|
||||
type: NotificationType!
|
||||
offset: Int!
|
||||
limit: Int!
|
||||
}
|
||||
|
||||
type FlashBackupStatus {
|
||||
"""Status message indicating the outcome of the backup initiation."""
|
||||
status: String!
|
||||
@@ -1391,6 +1666,19 @@ type CpuLoad {
|
||||
percentSteal: Float!
|
||||
}
|
||||
|
||||
type CpuPackages implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""Total CPU package power draw (W)"""
|
||||
totalPower: Float!
|
||||
|
||||
"""Power draw per package (W)"""
|
||||
power: [Float!]!
|
||||
|
||||
"""Temperature per package (°C)"""
|
||||
temp: [Float!]!
|
||||
}
|
||||
|
||||
type CpuUtilization implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
@@ -1454,6 +1742,12 @@ type InfoCpu implements Node {
|
||||
|
||||
"""CPU feature flags"""
|
||||
flags: [String!]
|
||||
|
||||
"""
|
||||
Per-package array of core/thread pairs, e.g. [[[0,1],[2,3]], [[4,5],[6,7]]]
|
||||
"""
|
||||
topology: [[[Int!]!]!]!
|
||||
packages: CpuPackages!
|
||||
}
|
||||
|
||||
type MemoryLayout implements Node {
|
||||
@@ -1753,60 +2047,6 @@ type Metrics implements Node {
|
||||
memory: MemoryUtilization
|
||||
}
|
||||
|
||||
type NotificationCounts {
|
||||
info: Int!
|
||||
warning: Int!
|
||||
alert: Int!
|
||||
total: Int!
|
||||
}
|
||||
|
||||
type NotificationOverview {
|
||||
unread: NotificationCounts!
|
||||
archive: NotificationCounts!
|
||||
}
|
||||
|
||||
type Notification implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""Also known as 'event'"""
|
||||
title: String!
|
||||
subject: String!
|
||||
description: String!
|
||||
importance: NotificationImportance!
|
||||
link: String
|
||||
type: NotificationType!
|
||||
|
||||
"""ISO Timestamp for when the notification occurred"""
|
||||
timestamp: String
|
||||
formattedTimestamp: String
|
||||
}
|
||||
|
||||
enum NotificationImportance {
|
||||
ALERT
|
||||
INFO
|
||||
WARNING
|
||||
}
|
||||
|
||||
enum NotificationType {
|
||||
UNREAD
|
||||
ARCHIVE
|
||||
}
|
||||
|
||||
type Notifications implements Node {
|
||||
id: PrefixedID!
|
||||
|
||||
"""A cached overview of the notifications in the system & their severity."""
|
||||
overview: NotificationOverview!
|
||||
list(filter: NotificationFilter!): [Notification!]!
|
||||
}
|
||||
|
||||
input NotificationFilter {
|
||||
importance: NotificationImportance
|
||||
type: NotificationType!
|
||||
offset: Int!
|
||||
limit: Int!
|
||||
}
|
||||
|
||||
type Owner {
|
||||
username: String!
|
||||
url: String!
|
||||
@@ -2416,6 +2656,11 @@ type Mutation {
|
||||
"""Marks a notification as archived."""
|
||||
archiveNotification(id: PrefixedID!): Notification!
|
||||
archiveNotifications(ids: [PrefixedID!]!): NotificationOverview!
|
||||
|
||||
"""
|
||||
Creates a notification if an equivalent unread notification does not already exist.
|
||||
"""
|
||||
notifyIfUnique(input: NotificationData!): Notification
|
||||
archiveAll(importance: NotificationImportance): NotificationOverview!
|
||||
|
||||
"""Marks a notification as unread."""
|
||||
@@ -2430,11 +2675,22 @@ type Mutation {
|
||||
vm: VmMutations!
|
||||
parityCheck: ParityCheckMutations!
|
||||
apiKey: ApiKeyMutations!
|
||||
customization: CustomizationMutations!
|
||||
rclone: RCloneMutations!
|
||||
createDockerFolder(name: String!, parentId: String, childrenIds: [String!]): ResolvedOrganizerV1!
|
||||
setDockerFolderChildren(folderId: String, childrenIds: [String!]!): ResolvedOrganizerV1!
|
||||
deleteDockerEntries(entryIds: [String!]!): ResolvedOrganizerV1!
|
||||
moveDockerEntriesToFolder(sourceEntryIds: [String!]!, destinationFolderId: String!): ResolvedOrganizerV1!
|
||||
moveDockerItemsToPosition(sourceEntryIds: [String!]!, destinationFolderId: String!, position: Float!): ResolvedOrganizerV1!
|
||||
renameDockerFolder(folderId: String!, newName: String!): ResolvedOrganizerV1!
|
||||
createDockerFolderWithItems(name: String!, parentId: String, sourceEntryIds: [String!], position: Float): ResolvedOrganizerV1!
|
||||
updateDockerViewPreferences(viewId: String = "default", prefs: JSON!): ResolvedOrganizerV1!
|
||||
syncDockerTemplatePaths: DockerTemplateSyncResult!
|
||||
|
||||
"""
|
||||
Reset Docker template mappings to defaults. Use this to recover from corrupted state.
|
||||
"""
|
||||
resetDockerTemplateMappings: Boolean!
|
||||
refreshDockerDigests: Boolean!
|
||||
|
||||
"""Initiates a flash drive backup using a configured remote."""
|
||||
@@ -2636,12 +2892,15 @@ input AccessUrlInput {
|
||||
type Subscription {
|
||||
notificationAdded: Notification!
|
||||
notificationsOverview: NotificationOverview!
|
||||
notificationsWarningsAndAlerts: [Notification!]!
|
||||
ownerSubscription: Owner!
|
||||
serversSubscription: Server!
|
||||
parityHistorySubscription: ParityCheck!
|
||||
arraySubscription: UnraidArray!
|
||||
dockerContainerStats: DockerContainerStats!
|
||||
logFile(path: String!): LogFileContent!
|
||||
systemMetricsCpu: CpuUtilization!
|
||||
systemMetricsCpuTelemetry: CpuPackages!
|
||||
systemMetricsMemory: MemoryUtilization!
|
||||
upsUpdates: UPSDevice!
|
||||
}
|
||||
@@ -12,8 +12,13 @@ default:
|
||||
@deploy remote:
|
||||
./scripts/deploy-dev.sh {{remote}}
|
||||
|
||||
# watches typescript files and restarts dev server on changes
|
||||
@watch:
|
||||
watchexec -e ts -r -- pnpm dev
|
||||
|
||||
alias b := build
|
||||
alias d := deploy
|
||||
alias w := watch
|
||||
|
||||
sync-env server:
|
||||
rsync -avz --progress --stats -e ssh .env* root@{{server}}:/usr/local/unraid-api
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@unraid/api",
|
||||
"version": "4.25.2",
|
||||
"version": "4.29.2",
|
||||
"main": "src/cli/index.ts",
|
||||
"type": "module",
|
||||
"corepack": {
|
||||
@@ -53,8 +53,8 @@
|
||||
"unraid-api": "dist/cli.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@apollo/client": "3.14.0",
|
||||
"@apollo/server": "4.12.2",
|
||||
"@apollo/client": "4.0.11",
|
||||
"@apollo/server": "5.2.0",
|
||||
"@as-integrations/fastify": "2.1.1",
|
||||
"@fastify/cookie": "11.0.2",
|
||||
"@fastify/helmet": "13.0.1",
|
||||
@@ -104,6 +104,7 @@
|
||||
"escape-html": "1.0.3",
|
||||
"execa": "9.6.0",
|
||||
"exit-hook": "4.0.0",
|
||||
"fast-xml-parser": "^5.3.0",
|
||||
"fastify": "5.5.0",
|
||||
"filenamify": "7.0.0",
|
||||
"fs-extra": "11.3.1",
|
||||
@@ -116,6 +117,7 @@
|
||||
"graphql-subscriptions": "3.0.0",
|
||||
"graphql-tag": "2.12.6",
|
||||
"graphql-ws": "6.0.6",
|
||||
"html-entities": "^2.6.0",
|
||||
"ini": "5.0.0",
|
||||
"ip": "2.0.1",
|
||||
"jose": "6.0.13",
|
||||
|
||||
@@ -7,7 +7,7 @@ import { exit } from 'process';
|
||||
import type { PackageJson } from 'type-fest';
|
||||
import { $, cd } from 'zx';
|
||||
|
||||
import { getDeploymentVersion } from './get-deployment-version.js';
|
||||
import { getDeploymentVersion } from '@app/../scripts/get-deployment-version.js';
|
||||
|
||||
type ApiPackageJson = PackageJson & {
|
||||
version: string;
|
||||
@@ -83,6 +83,10 @@ try {
|
||||
if (parsedPackageJson.dependencies?.[dep]) {
|
||||
delete parsedPackageJson.dependencies[dep];
|
||||
}
|
||||
// Also strip from peerDependencies (npm doesn't understand workspace: protocol)
|
||||
if (parsedPackageJson.peerDependencies?.[dep]) {
|
||||
delete parsedPackageJson.peerDependencies[dep];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -4,23 +4,18 @@ import {
|
||||
getBannerPathIfPresent,
|
||||
getCasePathIfPresent,
|
||||
} from '@app/core/utils/images/image-file-helpers.js';
|
||||
import { loadDynamixConfigFile } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { store } from '@app/store/index.js';
|
||||
import { loadDynamixConfig } from '@app/store/index.js';
|
||||
|
||||
test('get case path returns expected result', async () => {
|
||||
await expect(getCasePathIfPresent()).resolves.toContain('/dev/dynamix/case-model.png');
|
||||
});
|
||||
|
||||
test('get banner path returns null (state unloaded)', async () => {
|
||||
await expect(getBannerPathIfPresent()).resolves.toMatchInlineSnapshot('null');
|
||||
});
|
||||
|
||||
test('get banner path returns the banner (state loaded)', async () => {
|
||||
await store.dispatch(loadDynamixConfigFile()).unwrap();
|
||||
loadDynamixConfig();
|
||||
await expect(getBannerPathIfPresent()).resolves.toContain('/dev/dynamix/banner.png');
|
||||
});
|
||||
|
||||
test('get banner path returns null when no banner (state loaded)', async () => {
|
||||
await store.dispatch(loadDynamixConfigFile()).unwrap();
|
||||
loadDynamixConfig();
|
||||
await expect(getBannerPathIfPresent('notabanner.png')).resolves.toMatchInlineSnapshot('null');
|
||||
});
|
||||
|
||||
@@ -6,6 +6,7 @@ exports[`Returns paths 1`] = `
|
||||
"unraid-api-base",
|
||||
"unraid-data",
|
||||
"docker-autostart",
|
||||
"docker-userprefs",
|
||||
"docker-socket",
|
||||
"rclone-socket",
|
||||
"parity-checks",
|
||||
|
||||
@@ -11,6 +11,7 @@ test('Returns paths', async () => {
|
||||
'unraid-api-base': '/usr/local/unraid-api/',
|
||||
'unraid-data': expect.stringContaining('api/dev/data'),
|
||||
'docker-autostart': '/var/lib/docker/unraid-autostart',
|
||||
'docker-userprefs': '/boot/config/plugins/dockerMan/userprefs.cfg',
|
||||
'docker-socket': '/var/run/docker.sock',
|
||||
'parity-checks': expect.stringContaining('api/dev/states/parity-checks.log'),
|
||||
htpasswd: '/etc/nginx/htpasswd',
|
||||
|
||||
151
api/src/__test__/store/watch/registration-watch.test.ts
Normal file
151
api/src/__test__/store/watch/registration-watch.test.ts
Normal file
@@ -0,0 +1,151 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { StateFileKey } from '@app/store/types.js';
|
||||
import { RegistrationType } from '@app/unraid-api/graph/resolvers/registration/registration.model.js';
|
||||
|
||||
// Mock the store module
|
||||
vi.mock('@app/store/index.js', () => ({
|
||||
store: {
|
||||
dispatch: vi.fn(),
|
||||
},
|
||||
getters: {
|
||||
emhttp: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock the emhttp module
|
||||
vi.mock('@app/store/modules/emhttp.js', () => ({
|
||||
loadSingleStateFile: vi.fn((key) => ({ type: 'emhttp/load-single-state-file', payload: key })),
|
||||
}));
|
||||
|
||||
// Mock the registration module
|
||||
vi.mock('@app/store/modules/registration.js', () => ({
|
||||
loadRegistrationKey: vi.fn(() => ({ type: 'registration/load-registration-key' })),
|
||||
}));
|
||||
|
||||
// Mock the logger
|
||||
vi.mock('@app/core/log.js', () => ({
|
||||
keyServerLogger: {
|
||||
info: vi.fn(),
|
||||
debug: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
describe('reloadVarIniWithRetry', () => {
|
||||
let store: { dispatch: ReturnType<typeof vi.fn> };
|
||||
let getters: { emhttp: ReturnType<typeof vi.fn> };
|
||||
let loadSingleStateFile: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.useFakeTimers();
|
||||
|
||||
const storeModule = await import('@app/store/index.js');
|
||||
const emhttpModule = await import('@app/store/modules/emhttp.js');
|
||||
|
||||
store = storeModule.store as unknown as typeof store;
|
||||
getters = storeModule.getters as unknown as typeof getters;
|
||||
loadSingleStateFile = emhttpModule.loadSingleStateFile as unknown as typeof loadSingleStateFile;
|
||||
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it('returns early when registration state changes on first retry', async () => {
|
||||
// Initial state is TRIAL
|
||||
getters.emhttp
|
||||
.mockReturnValueOnce({ var: { regTy: RegistrationType.TRIAL } }) // First call (beforeState)
|
||||
.mockReturnValueOnce({ var: { regTy: RegistrationType.UNLEASHED } }); // After first reload
|
||||
|
||||
const { reloadVarIniWithRetry } = await import('@app/store/watch/registration-watch.js');
|
||||
|
||||
const promise = reloadVarIniWithRetry();
|
||||
|
||||
// Advance past the first delay (500ms)
|
||||
await vi.advanceTimersByTimeAsync(500);
|
||||
await promise;
|
||||
|
||||
// Should only dispatch once since state changed
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(1);
|
||||
expect(loadSingleStateFile).toHaveBeenCalledWith(StateFileKey.var);
|
||||
});
|
||||
|
||||
it('retries up to maxRetries when state does not change', async () => {
|
||||
// State never changes
|
||||
getters.emhttp.mockReturnValue({ var: { regTy: RegistrationType.TRIAL } });
|
||||
|
||||
const { reloadVarIniWithRetry } = await import('@app/store/watch/registration-watch.js');
|
||||
|
||||
const promise = reloadVarIniWithRetry(3);
|
||||
|
||||
// Advance through all retries: 500ms, 1000ms, 2000ms
|
||||
await vi.advanceTimersByTimeAsync(500);
|
||||
await vi.advanceTimersByTimeAsync(1000);
|
||||
await vi.advanceTimersByTimeAsync(2000);
|
||||
await promise;
|
||||
|
||||
// Should dispatch 3 times (maxRetries)
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(3);
|
||||
});
|
||||
|
||||
it('stops retrying when state changes on second attempt', async () => {
|
||||
getters.emhttp
|
||||
.mockReturnValueOnce({ var: { regTy: RegistrationType.TRIAL } }) // beforeState
|
||||
.mockReturnValueOnce({ var: { regTy: RegistrationType.TRIAL } }) // After first reload (no change)
|
||||
.mockReturnValueOnce({ var: { regTy: RegistrationType.UNLEASHED } }); // After second reload (changed!)
|
||||
|
||||
const { reloadVarIniWithRetry } = await import('@app/store/watch/registration-watch.js');
|
||||
|
||||
const promise = reloadVarIniWithRetry(3);
|
||||
|
||||
// First retry
|
||||
await vi.advanceTimersByTimeAsync(500);
|
||||
// Second retry
|
||||
await vi.advanceTimersByTimeAsync(1000);
|
||||
await promise;
|
||||
|
||||
// Should dispatch twice - stopped after state changed
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('handles undefined regTy gracefully', async () => {
|
||||
getters.emhttp.mockReturnValue({ var: {} });
|
||||
|
||||
const { reloadVarIniWithRetry } = await import('@app/store/watch/registration-watch.js');
|
||||
|
||||
const promise = reloadVarIniWithRetry(1);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(500);
|
||||
await promise;
|
||||
|
||||
// Should still dispatch even with undefined regTy
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('uses exponential backoff delays', async () => {
|
||||
getters.emhttp.mockReturnValue({ var: { regTy: RegistrationType.TRIAL } });
|
||||
|
||||
const { reloadVarIniWithRetry } = await import('@app/store/watch/registration-watch.js');
|
||||
|
||||
const promise = reloadVarIniWithRetry(3);
|
||||
|
||||
// At 0ms, no dispatch yet
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(0);
|
||||
|
||||
// At 500ms, first dispatch
|
||||
await vi.advanceTimersByTimeAsync(500);
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(1);
|
||||
|
||||
// At 1500ms (500 + 1000), second dispatch
|
||||
await vi.advanceTimersByTimeAsync(1000);
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(2);
|
||||
|
||||
// At 3500ms (500 + 1000 + 2000), third dispatch
|
||||
await vi.advanceTimersByTimeAsync(2000);
|
||||
expect(store.dispatch).toHaveBeenCalledTimes(3);
|
||||
|
||||
await promise;
|
||||
});
|
||||
});
|
||||
234
api/src/common/compare-semver-version.spec.ts
Normal file
234
api/src/common/compare-semver-version.spec.ts
Normal file
@@ -0,0 +1,234 @@
|
||||
import { eq, gt, gte, lt, lte, parse } from 'semver';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import { compareVersions } from '@app/common/compare-semver-version.js';
|
||||
|
||||
describe('compareVersions', () => {
|
||||
describe('basic comparisons', () => {
|
||||
it('should return true when current version is greater than compared (gte)', () => {
|
||||
const current = parse('7.3.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true when current version equals compared (gte)', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when current version is less than compared (gte)', () => {
|
||||
const current = parse('7.1.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when current version is less than compared (lte)', () => {
|
||||
const current = parse('7.1.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true when current version equals compared (lte)', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when current version is greater than compared (lte)', () => {
|
||||
const current = parse('7.3.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lte)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when current version is greater than compared (gt)', () => {
|
||||
const current = parse('7.3.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gt)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when current version equals compared (gt)', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gt)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when current version is less than compared (lt)', () => {
|
||||
const current = parse('7.1.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lt)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when current version equals compared (lt)', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lt)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when versions are equal (eq)', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, eq)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when versions are not equal (eq)', () => {
|
||||
const current = parse('7.3.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, eq)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('prerelease handling - current has prerelease, compared is stable', () => {
|
||||
it('should return true for gte when current prerelease > stable (same base)', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for gt when current prerelease > stable (same base)', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gt)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for lte when current prerelease < stable (same base)', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lte)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for lt when current prerelease < stable (same base)', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, lt)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for eq when current prerelease != stable (same base)', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, eq)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('prerelease handling - current is stable, compared has prerelease', () => {
|
||||
it('should use normal comparison when current is stable and compared has prerelease', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0-beta.1')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use normal comparison for lte when current is stable and compared has prerelease', () => {
|
||||
const current = parse('7.2.0')!;
|
||||
const compared = parse('7.2.0-beta.1')!;
|
||||
expect(compareVersions(current, compared, lte)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('prerelease handling - both have prerelease', () => {
|
||||
it('should use normal comparison when both versions have prerelease', () => {
|
||||
const current = parse('7.2.0-beta.2')!;
|
||||
const compared = parse('7.2.0-beta.1')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use normal comparison for lte when both have prerelease', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0-beta.2')!;
|
||||
expect(compareVersions(current, compared, lte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use normal comparison when prerelease versions are equal', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0-beta.1')!;
|
||||
expect(compareVersions(current, compared, eq)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('prerelease handling - different base versions', () => {
|
||||
it('should use normal comparison when base versions differ (current prerelease)', () => {
|
||||
const current = parse('7.3.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use normal comparison when base versions differ (current prerelease, less)', () => {
|
||||
const current = parse('7.1.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('includePrerelease flag', () => {
|
||||
it('should apply special prerelease handling when includePrerelease is true', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte, { includePrerelease: true })).toBe(true);
|
||||
});
|
||||
|
||||
it('should skip special prerelease handling when includePrerelease is false', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte, { includePrerelease: false })).toBe(false);
|
||||
});
|
||||
|
||||
it('should default to includePrerelease true', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle patch version differences', () => {
|
||||
const current = parse('7.2.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle minor version differences', () => {
|
||||
const current = parse('7.3.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle major version differences', () => {
|
||||
const current = parse('8.0.0')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle complex prerelease tags', () => {
|
||||
const current = parse('7.2.0-beta.2.4')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle alpha prerelease tags', () => {
|
||||
const current = parse('7.2.0-alpha.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle rc prerelease tags', () => {
|
||||
const current = parse('7.2.0-rc.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
expect(compareVersions(current, compared, gte)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('comparison function edge cases', () => {
|
||||
it('should handle custom comparison functions that are not gte/lte/gt/lt', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
const customCompare = (a: typeof current, b: typeof compared) => a.compare(b) === 1;
|
||||
expect(compareVersions(current, compared, customCompare)).toBe(false);
|
||||
});
|
||||
|
||||
it('should fall through to normal comparison for unknown functions with prerelease', () => {
|
||||
const current = parse('7.2.0-beta.1')!;
|
||||
const compared = parse('7.2.0')!;
|
||||
const customCompare = () => false;
|
||||
expect(compareVersions(current, compared, customCompare)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
44
api/src/common/compare-semver-version.ts
Normal file
44
api/src/common/compare-semver-version.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import type { SemVer } from 'semver';
|
||||
import { gt, gte, lt, lte } from 'semver';
|
||||
|
||||
/**
|
||||
* Shared version comparison logic with special handling for prerelease versions.
|
||||
*
|
||||
* When base versions are equal and current version has a prerelease tag while compared doesn't:
|
||||
* - For gte/gt: prerelease is considered greater than stable (returns true)
|
||||
* - For lte/lt: prerelease is considered less than stable (returns false)
|
||||
* - For eq: prerelease is not equal to stable (returns false)
|
||||
*
|
||||
* @param currentVersion - The current Unraid version (SemVer object)
|
||||
* @param comparedVersion - The version to compare against (SemVer object)
|
||||
* @param compareFn - The comparison function (e.g., gte, lte, lt, gt, eq)
|
||||
* @param includePrerelease - Whether to include special prerelease handling
|
||||
* @returns The result of the comparison
|
||||
*/
|
||||
export const compareVersions = (
|
||||
currentVersion: SemVer,
|
||||
comparedVersion: SemVer,
|
||||
compareFn: (a: SemVer, b: SemVer) => boolean,
|
||||
{ includePrerelease = true }: { includePrerelease?: boolean } = {}
|
||||
): boolean => {
|
||||
if (includePrerelease) {
|
||||
const baseCurrent = `${currentVersion.major}.${currentVersion.minor}.${currentVersion.patch}`;
|
||||
const baseCompared = `${comparedVersion.major}.${comparedVersion.minor}.${comparedVersion.patch}`;
|
||||
|
||||
if (baseCurrent === baseCompared) {
|
||||
const currentHasPrerelease = currentVersion.prerelease.length > 0;
|
||||
const comparedHasPrerelease = comparedVersion.prerelease.length > 0;
|
||||
|
||||
if (currentHasPrerelease && !comparedHasPrerelease) {
|
||||
if (compareFn === gte || compareFn === gt) {
|
||||
return true;
|
||||
}
|
||||
if (compareFn === lte || compareFn === lt) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return compareFn(currentVersion, comparedVersion);
|
||||
};
|
||||
60
api/src/common/get-unraid-version-sync.ts
Normal file
60
api/src/common/get-unraid-version-sync.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import type { SemVer } from 'semver';
|
||||
import { coerce } from 'semver';
|
||||
|
||||
import { compareVersions } from '@app/common/compare-semver-version.js';
|
||||
import { fileExistsSync } from '@app/core/utils/files/file-exists.js';
|
||||
import { parseConfig } from '@app/core/utils/misc/parse-config.js';
|
||||
|
||||
type UnraidVersionIni = {
|
||||
version?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Synchronously reads the Unraid version from /etc/unraid-version
|
||||
* @returns The Unraid version string, or 'unknown' if the file cannot be read
|
||||
*/
|
||||
export const getUnraidVersionSync = (): string => {
|
||||
const versionPath = '/etc/unraid-version';
|
||||
|
||||
if (!fileExistsSync(versionPath)) {
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
try {
|
||||
const versionIni = parseConfig<UnraidVersionIni>({ filePath: versionPath, type: 'ini' });
|
||||
return versionIni.version || 'unknown';
|
||||
} catch {
|
||||
return 'unknown';
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Compares the Unraid version against a specified version using a comparison function
|
||||
* @param compareFn - The comparison function from semver (e.g., lt, gte, lte, gt, eq)
|
||||
* @param version - The version to compare against (e.g., '7.3.0')
|
||||
* @param options - Options for the comparison
|
||||
* @returns The result of the comparison, or false if the version cannot be determined
|
||||
*/
|
||||
export const compareUnraidVersionSync = (
|
||||
compareFn: (a: SemVer, b: SemVer) => boolean,
|
||||
version: string,
|
||||
{ includePrerelease = true }: { includePrerelease?: boolean } = {}
|
||||
): boolean => {
|
||||
const currentVersion = getUnraidVersionSync();
|
||||
if (currentVersion === 'unknown') {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const current = coerce(currentVersion, { includePrerelease });
|
||||
const compared = coerce(version, { includePrerelease });
|
||||
|
||||
if (!current || !compared) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return compareVersions(current, compared, compareFn, { includePrerelease });
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
12
api/src/connect-plugin-cleanup.ts
Normal file
12
api/src/connect-plugin-cleanup.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { existsSync } from 'node:fs';
|
||||
|
||||
/**
|
||||
* Local filesystem and env checks stay synchronous so we can branch at module load.
|
||||
* @returns True if the Connect Unraid plugin is installed, false otherwise.
|
||||
*/
|
||||
export const isConnectPluginInstalled = () => {
|
||||
if (process.env.SKIP_CONNECT_PLUGIN_CHECK === 'true') {
|
||||
return true;
|
||||
}
|
||||
return existsSync('/boot/config/plugins/dynamix.unraid.net.plg');
|
||||
};
|
||||
@@ -1,7 +1,7 @@
|
||||
import pino from 'pino';
|
||||
import pretty from 'pino-pretty';
|
||||
|
||||
import { API_VERSION, LOG_LEVEL, LOG_TYPE, SUPPRESS_LOGS } from '@app/environment.js';
|
||||
import { API_VERSION, LOG_LEVEL, LOG_TYPE, PATHS_LOGS_FILE, SUPPRESS_LOGS } from '@app/environment.js';
|
||||
|
||||
export const levels = ['trace', 'debug', 'info', 'warn', 'error', 'fatal'] as const;
|
||||
|
||||
@@ -15,18 +15,24 @@ const nullDestination = pino.destination({
|
||||
},
|
||||
});
|
||||
|
||||
const LOG_TRANSPORT = process.env.LOG_TRANSPORT ?? 'file';
|
||||
const useConsole = LOG_TRANSPORT === 'console';
|
||||
|
||||
export const logDestination =
|
||||
process.env.SUPPRESS_LOGS === 'true' ? nullDestination : pino.destination();
|
||||
// Since PM2 captures stdout and writes to the log file, we should not colorize stdout
|
||||
// to avoid ANSI escape codes in the log file
|
||||
process.env.SUPPRESS_LOGS === 'true'
|
||||
? nullDestination
|
||||
: useConsole
|
||||
? pino.destination(1) // stdout
|
||||
: pino.destination({ dest: PATHS_LOGS_FILE, mkdir: true });
|
||||
|
||||
const stream = SUPPRESS_LOGS
|
||||
? nullDestination
|
||||
: LOG_TYPE === 'pretty'
|
||||
? pretty({
|
||||
singleLine: true,
|
||||
hideObject: false,
|
||||
colorize: false, // No colors since PM2 writes stdout to file
|
||||
colorizeObjects: false,
|
||||
colorize: useConsole, // Enable colors when outputting to console
|
||||
colorizeObjects: useConsole,
|
||||
levelFirst: false,
|
||||
ignore: 'hostname,pid',
|
||||
destination: logDestination,
|
||||
@@ -34,10 +40,10 @@ const stream = SUPPRESS_LOGS
|
||||
customPrettifiers: {
|
||||
time: (timestamp: string | object) => `[${timestamp}`,
|
||||
level: (_logLevel: string | object, _key: string, log: any, extras: any) => {
|
||||
// Use label instead of labelColorized for non-colored output
|
||||
const { label } = extras;
|
||||
const { label, labelColorized } = extras;
|
||||
const context = log.context || log.logger || 'app';
|
||||
return `${label} ${context}]`;
|
||||
// Use colorized label when outputting to console
|
||||
return `${useConsole ? labelColorized : label} ${context}]`;
|
||||
},
|
||||
},
|
||||
messageFormat: (log: any, messageKey: string) => {
|
||||
|
||||
66
api/src/core/utils/__test__/safe-mode.test.ts
Normal file
66
api/src/core/utils/__test__/safe-mode.test.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { afterEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { isSafeModeEnabled } from '@app/core/utils/safe-mode.js';
|
||||
import { store } from '@app/store/index.js';
|
||||
import * as stateFileLoader from '@app/store/services/state-file-loader.js';
|
||||
|
||||
describe('isSafeModeEnabled', () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('returns the safe mode flag already present in the store', () => {
|
||||
const baseState = store.getState();
|
||||
vi.spyOn(store, 'getState').mockReturnValue({
|
||||
...baseState,
|
||||
emhttp: {
|
||||
...baseState.emhttp,
|
||||
var: {
|
||||
...(baseState.emhttp?.var ?? {}),
|
||||
safeMode: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
const loaderSpy = vi.spyOn(stateFileLoader, 'loadStateFileSync');
|
||||
|
||||
expect(isSafeModeEnabled()).toBe(true);
|
||||
expect(loaderSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('falls back to the synchronous loader when store state is missing', () => {
|
||||
const baseState = store.getState();
|
||||
vi.spyOn(store, 'getState').mockReturnValue({
|
||||
...baseState,
|
||||
emhttp: {
|
||||
...baseState.emhttp,
|
||||
var: {
|
||||
...(baseState.emhttp?.var ?? {}),
|
||||
safeMode: undefined as unknown as boolean,
|
||||
} as typeof baseState.emhttp.var,
|
||||
} as typeof baseState.emhttp,
|
||||
} as typeof baseState);
|
||||
vi.spyOn(stateFileLoader, 'loadStateFileSync').mockReturnValue({
|
||||
...(baseState.emhttp?.var ?? {}),
|
||||
safeMode: true,
|
||||
} as any);
|
||||
|
||||
expect(isSafeModeEnabled()).toBe(true);
|
||||
});
|
||||
|
||||
it('defaults to false when loader cannot provide state', () => {
|
||||
const baseState = store.getState();
|
||||
vi.spyOn(store, 'getState').mockReturnValue({
|
||||
...baseState,
|
||||
emhttp: {
|
||||
...baseState.emhttp,
|
||||
var: {
|
||||
...(baseState.emhttp?.var ?? {}),
|
||||
safeMode: undefined as unknown as boolean,
|
||||
} as typeof baseState.emhttp.var,
|
||||
} as typeof baseState.emhttp,
|
||||
} as typeof baseState);
|
||||
vi.spyOn(stateFileLoader, 'loadStateFileSync').mockReturnValue(null);
|
||||
|
||||
expect(isSafeModeEnabled()).toBe(false);
|
||||
});
|
||||
});
|
||||
231
api/src/core/utils/misc/__test__/timeout-budget.test.ts
Normal file
231
api/src/core/utils/misc/__test__/timeout-budget.test.ts
Normal file
@@ -0,0 +1,231 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { TimeoutBudget } from '@app/core/utils/misc/timeout-budget.js';
|
||||
|
||||
describe('TimeoutBudget', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe('constructor', () => {
|
||||
it('initializes with the given budget', () => {
|
||||
const budget = new TimeoutBudget(10000);
|
||||
expect(budget.remaining()).toBe(10000);
|
||||
expect(budget.elapsed()).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('remaining', () => {
|
||||
it('returns full budget immediately after construction', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
expect(budget.remaining()).toBe(5000);
|
||||
});
|
||||
|
||||
it('decreases as time passes', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
|
||||
vi.advanceTimersByTime(1000);
|
||||
expect(budget.remaining()).toBe(4000);
|
||||
|
||||
vi.advanceTimersByTime(2000);
|
||||
expect(budget.remaining()).toBe(2000);
|
||||
});
|
||||
|
||||
it('never returns negative values', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
|
||||
vi.advanceTimersByTime(5000); // Well past the budget
|
||||
expect(budget.remaining()).toBe(0);
|
||||
});
|
||||
|
||||
it('returns zero when budget is exactly exhausted', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
|
||||
vi.advanceTimersByTime(1000);
|
||||
expect(budget.remaining()).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('elapsed', () => {
|
||||
it('returns zero immediately after construction', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
expect(budget.elapsed()).toBe(0);
|
||||
});
|
||||
|
||||
it('increases as time passes', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
|
||||
vi.advanceTimersByTime(1000);
|
||||
expect(budget.elapsed()).toBe(1000);
|
||||
|
||||
vi.advanceTimersByTime(500);
|
||||
expect(budget.elapsed()).toBe(1500);
|
||||
});
|
||||
|
||||
it('continues increasing past the budget limit', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
|
||||
vi.advanceTimersByTime(2000);
|
||||
expect(budget.elapsed()).toBe(2000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getTimeout', () => {
|
||||
it('returns maxMs when plenty of budget remains', () => {
|
||||
const budget = new TimeoutBudget(10000);
|
||||
expect(budget.getTimeout(2000)).toBe(2000);
|
||||
});
|
||||
|
||||
it('returns maxMs when budget minus reserve is sufficient', () => {
|
||||
const budget = new TimeoutBudget(10000);
|
||||
expect(budget.getTimeout(2000, 5000)).toBe(2000);
|
||||
});
|
||||
|
||||
it('caps timeout to available budget minus reserve', () => {
|
||||
const budget = new TimeoutBudget(10000);
|
||||
vi.advanceTimersByTime(5000); // 5000ms remaining
|
||||
|
||||
// Want 2000ms but reserve 4000ms, only 1000ms available
|
||||
expect(budget.getTimeout(2000, 4000)).toBe(1000);
|
||||
});
|
||||
|
||||
it('caps timeout to remaining budget when no reserve', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
vi.advanceTimersByTime(800); // 200ms remaining
|
||||
|
||||
expect(budget.getTimeout(500)).toBe(200);
|
||||
});
|
||||
|
||||
it('returns minimum of 100ms even when budget is exhausted', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
vi.advanceTimersByTime(2000); // Budget exhausted
|
||||
|
||||
expect(budget.getTimeout(500)).toBe(100);
|
||||
});
|
||||
|
||||
it('returns minimum of 100ms when reserve exceeds remaining', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
vi.advanceTimersByTime(4000); // 1000ms remaining
|
||||
|
||||
// Reserve 2000ms but only 1000ms remaining
|
||||
expect(budget.getTimeout(500, 2000)).toBe(100);
|
||||
});
|
||||
|
||||
it('uses default reserve of 0 when not specified', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
vi.advanceTimersByTime(500); // 500ms remaining
|
||||
|
||||
expect(budget.getTimeout(1000)).toBe(500); // Capped to remaining
|
||||
});
|
||||
});
|
||||
|
||||
describe('hasTimeFor', () => {
|
||||
it('returns true when enough time remains', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
expect(budget.hasTimeFor(3000)).toBe(true);
|
||||
});
|
||||
|
||||
it('returns true when exactly enough time remains', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
expect(budget.hasTimeFor(5000)).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false when not enough time remains', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
expect(budget.hasTimeFor(6000)).toBe(false);
|
||||
});
|
||||
|
||||
it('accounts for elapsed time', () => {
|
||||
const budget = new TimeoutBudget(5000);
|
||||
vi.advanceTimersByTime(3000); // 2000ms remaining
|
||||
|
||||
expect(budget.hasTimeFor(2000)).toBe(true);
|
||||
expect(budget.hasTimeFor(3000)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false when budget is exhausted', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
vi.advanceTimersByTime(2000);
|
||||
|
||||
expect(budget.hasTimeFor(1)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns true for zero required time', () => {
|
||||
const budget = new TimeoutBudget(1000);
|
||||
vi.advanceTimersByTime(2000); // Budget exhausted
|
||||
|
||||
expect(budget.hasTimeFor(0)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('integration scenarios', () => {
|
||||
it('simulates a typical startup sequence', () => {
|
||||
const budget = new TimeoutBudget(13000); // 13 second budget
|
||||
const BOOTSTRAP_RESERVE = 8000;
|
||||
const MAX_OP_TIMEOUT = 2000;
|
||||
|
||||
// First operation - should get full 2000ms
|
||||
const op1Timeout = budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
expect(op1Timeout).toBe(2000);
|
||||
|
||||
// Simulate operation taking 500ms
|
||||
vi.advanceTimersByTime(500);
|
||||
|
||||
// Second operation - still have plenty of budget
|
||||
const op2Timeout = budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
expect(op2Timeout).toBe(2000);
|
||||
|
||||
// Simulate operation taking 1000ms
|
||||
vi.advanceTimersByTime(1000);
|
||||
|
||||
// Third operation
|
||||
const op3Timeout = budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
expect(op3Timeout).toBe(2000);
|
||||
|
||||
// Simulate slow operation taking 2000ms
|
||||
vi.advanceTimersByTime(2000);
|
||||
|
||||
// Now 3500ms elapsed, 9500ms remaining
|
||||
// After reserve, only 1500ms available - less than max
|
||||
const op4Timeout = budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
expect(op4Timeout).toBe(1500);
|
||||
|
||||
// Simulate operation completing
|
||||
vi.advanceTimersByTime(1000);
|
||||
|
||||
// Bootstrap phase - use all remaining time
|
||||
const bootstrapTimeout = budget.remaining();
|
||||
expect(bootstrapTimeout).toBe(8500);
|
||||
expect(budget.hasTimeFor(8000)).toBe(true);
|
||||
});
|
||||
|
||||
it('handles worst-case scenario where all operations timeout', () => {
|
||||
const budget = new TimeoutBudget(13000);
|
||||
const BOOTSTRAP_RESERVE = 8000;
|
||||
const MAX_OP_TIMEOUT = 2000;
|
||||
|
||||
// Each operation times out at its limit
|
||||
// Available for operations: 13000 - 8000 = 5000ms
|
||||
|
||||
// Op 1: gets 2000ms, times out
|
||||
budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
vi.advanceTimersByTime(2000);
|
||||
|
||||
// Op 2: gets 2000ms, times out
|
||||
budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
vi.advanceTimersByTime(2000);
|
||||
|
||||
// Op 3: only 1000ms available (5000 - 4000), times out
|
||||
const op3Timeout = budget.getTimeout(MAX_OP_TIMEOUT, BOOTSTRAP_RESERVE);
|
||||
expect(op3Timeout).toBe(1000);
|
||||
vi.advanceTimersByTime(1000);
|
||||
|
||||
// Bootstrap: should still have 8000ms
|
||||
expect(budget.remaining()).toBe(8000);
|
||||
});
|
||||
});
|
||||
});
|
||||
65
api/src/core/utils/misc/__test__/with-timeout.test.ts
Normal file
65
api/src/core/utils/misc/__test__/with-timeout.test.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import { withTimeout } from '@app/core/utils/misc/with-timeout.js';
|
||||
|
||||
describe('withTimeout', () => {
|
||||
it('resolves when promise completes before timeout', async () => {
|
||||
const promise = Promise.resolve('success');
|
||||
const result = await withTimeout(promise, 1000, 'testOp');
|
||||
expect(result).toBe('success');
|
||||
});
|
||||
|
||||
it('resolves with correct value for delayed promise within timeout', async () => {
|
||||
const promise = new Promise<number>((resolve) => setTimeout(() => resolve(42), 50));
|
||||
const result = await withTimeout(promise, 1000, 'testOp');
|
||||
expect(result).toBe(42);
|
||||
});
|
||||
|
||||
it('rejects when promise takes longer than timeout', async () => {
|
||||
const promise = new Promise<string>((resolve) => setTimeout(() => resolve('late'), 500));
|
||||
await expect(withTimeout(promise, 50, 'slowOp')).rejects.toThrow('slowOp timed out after 50ms');
|
||||
});
|
||||
|
||||
it('includes operation name in timeout error message', async () => {
|
||||
const promise = new Promise<void>(() => {}); // Never resolves
|
||||
await expect(withTimeout(promise, 10, 'myCustomOperation')).rejects.toThrow(
|
||||
'myCustomOperation timed out after 10ms'
|
||||
);
|
||||
});
|
||||
|
||||
it('propagates rejection from the original promise', async () => {
|
||||
const promise = Promise.reject(new Error('original error'));
|
||||
await expect(withTimeout(promise, 1000, 'testOp')).rejects.toThrow('original error');
|
||||
});
|
||||
|
||||
it('resolves immediately for already-resolved promises', async () => {
|
||||
const promise = Promise.resolve('immediate');
|
||||
const start = Date.now();
|
||||
const result = await withTimeout(promise, 1000, 'testOp');
|
||||
const elapsed = Date.now() - start;
|
||||
|
||||
expect(result).toBe('immediate');
|
||||
expect(elapsed).toBeLessThan(50); // Should be nearly instant
|
||||
});
|
||||
|
||||
it('works with zero timeout (immediately times out for pending promises)', async () => {
|
||||
const promise = new Promise<void>(() => {}); // Never resolves
|
||||
await expect(withTimeout(promise, 0, 'zeroTimeout')).rejects.toThrow(
|
||||
'zeroTimeout timed out after 0ms'
|
||||
);
|
||||
});
|
||||
|
||||
it('preserves the type of the resolved value', async () => {
|
||||
interface TestType {
|
||||
id: number;
|
||||
name: string;
|
||||
}
|
||||
const testObj: TestType = { id: 1, name: 'test' };
|
||||
const promise = Promise.resolve(testObj);
|
||||
|
||||
const result = await withTimeout(promise, 1000, 'testOp');
|
||||
|
||||
expect(result.id).toBe(1);
|
||||
expect(result.name).toBe('test');
|
||||
});
|
||||
});
|
||||
@@ -2,7 +2,7 @@ import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { getters } from '@app/store/index.js';
|
||||
|
||||
interface DockerError extends NodeJS.ErrnoException {
|
||||
address: string;
|
||||
address?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
70
api/src/core/utils/misc/timeout-budget.ts
Normal file
70
api/src/core/utils/misc/timeout-budget.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
/**
|
||||
* Tracks remaining time budget to ensure we don't exceed external timeouts (e.g., PM2's listen_timeout).
|
||||
*
|
||||
* This class helps coordinate multiple async operations by:
|
||||
* - Tracking elapsed time from construction
|
||||
* - Calculating dynamic timeouts based on remaining budget
|
||||
* - Reserving time for critical operations (like server bootstrap)
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const budget = new TimeoutBudget(15000); // 15 second total budget
|
||||
*
|
||||
* // Each operation gets a timeout capped by remaining budget
|
||||
* await withTimeout(loadConfig(), budget.getTimeout(2000, 8000), 'loadConfig');
|
||||
* await withTimeout(loadState(), budget.getTimeout(2000, 8000), 'loadState');
|
||||
*
|
||||
* // Bootstrap gets all remaining time
|
||||
* await withTimeout(bootstrap(), budget.remaining(), 'bootstrap');
|
||||
*
|
||||
* console.log(`Completed in ${budget.elapsed()}ms`);
|
||||
* ```
|
||||
*/
|
||||
export class TimeoutBudget {
|
||||
private startTime: number;
|
||||
private budgetMs: number;
|
||||
|
||||
/**
|
||||
* Creates a new startup budget tracker.
|
||||
* @param budgetMs Total time budget in milliseconds
|
||||
*/
|
||||
constructor(budgetMs: number) {
|
||||
this.startTime = Date.now();
|
||||
this.budgetMs = budgetMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns remaining time in milliseconds.
|
||||
* Never returns negative values.
|
||||
*/
|
||||
remaining(): number {
|
||||
return Math.max(0, this.budgetMs - (Date.now() - this.startTime));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns elapsed time in milliseconds since construction.
|
||||
*/
|
||||
elapsed(): number {
|
||||
return Date.now() - this.startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns timeout for an operation, capped by remaining budget.
|
||||
*
|
||||
* @param maxMs Maximum timeout for this operation
|
||||
* @param reserveMs Time to reserve for future operations (e.g., server bootstrap)
|
||||
* @returns Timeout in milliseconds (minimum 100ms to avoid instant failures)
|
||||
*/
|
||||
getTimeout(maxMs: number, reserveMs: number = 0): number {
|
||||
const available = this.remaining() - reserveMs;
|
||||
return Math.max(100, Math.min(maxMs, available));
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if there's enough time remaining for an operation.
|
||||
* @param requiredMs Time required in milliseconds
|
||||
*/
|
||||
hasTimeFor(requiredMs: number): boolean {
|
||||
return this.remaining() >= requiredMs;
|
||||
}
|
||||
}
|
||||
25
api/src/core/utils/misc/with-timeout.ts
Normal file
25
api/src/core/utils/misc/with-timeout.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* Wraps a promise with a timeout to prevent hangs.
|
||||
* If the operation takes longer than timeoutMs, it rejects with a timeout error.
|
||||
*
|
||||
* @param promise The promise to wrap with a timeout
|
||||
* @param timeoutMs Maximum time in milliseconds before timing out
|
||||
* @param operationName Name of the operation for the error message
|
||||
* @returns The result of the promise if it completes in time
|
||||
* @throws Error if the operation times out
|
||||
*/
|
||||
export const withTimeout = <T>(
|
||||
promise: Promise<T>,
|
||||
timeoutMs: number,
|
||||
operationName: string
|
||||
): Promise<T> => {
|
||||
return Promise.race([
|
||||
promise,
|
||||
new Promise<never>((_, reject) =>
|
||||
setTimeout(
|
||||
() => reject(new Error(`${operationName} timed out after ${timeoutMs}ms`)),
|
||||
timeoutMs
|
||||
)
|
||||
),
|
||||
]);
|
||||
};
|
||||
19
api/src/core/utils/network.ts
Normal file
19
api/src/core/utils/network.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import { getters } from '@app/store/index.js';
|
||||
|
||||
/**
|
||||
* Returns the LAN IPv4 address reported by emhttp, if available.
|
||||
*/
|
||||
export function getLanIp(): string {
|
||||
const emhttp = getters.emhttp();
|
||||
const lanFromNetworks = emhttp?.networks?.[0]?.ipaddr?.[0];
|
||||
if (lanFromNetworks) {
|
||||
return lanFromNetworks;
|
||||
}
|
||||
|
||||
const lanFromNginx = emhttp?.nginx?.lanIp;
|
||||
if (lanFromNginx) {
|
||||
return lanFromNginx;
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
17
api/src/core/utils/safe-mode.ts
Normal file
17
api/src/core/utils/safe-mode.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { store } from '@app/store/index.js';
|
||||
import { loadStateFileSync } from '@app/store/services/state-file-loader.js';
|
||||
import { StateFileKey } from '@app/store/types.js';
|
||||
|
||||
export const isSafeModeEnabled = (): boolean => {
|
||||
const safeModeFromStore = store.getState().emhttp?.var?.safeMode;
|
||||
if (typeof safeModeFromStore === 'boolean') {
|
||||
return safeModeFromStore;
|
||||
}
|
||||
|
||||
const varState = loadStateFileSync(StateFileKey.var);
|
||||
if (varState) {
|
||||
return Boolean(varState.safeMode);
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
@@ -111,5 +111,10 @@ export const PATHS_CONFIG_MODULES =
|
||||
export const PATHS_LOCAL_SESSION_FILE =
|
||||
process.env.PATHS_LOCAL_SESSION_FILE ?? '/var/run/unraid-api/local-session';
|
||||
|
||||
export const PATHS_DOCKER_TEMPLATES = process.env.PATHS_DOCKER_TEMPLATES?.split(',') ?? [
|
||||
'/boot/config/plugins/dockerMan/templates-user',
|
||||
'/boot/config/plugins/dockerMan/templates',
|
||||
];
|
||||
|
||||
/** feature flag for the upcoming docker release */
|
||||
export const ENABLE_NEXT_DOCKER_RELEASE = process.env.ENABLE_NEXT_DOCKER_RELEASE === 'true';
|
||||
|
||||
113
api/src/index.ts
113
api/src/index.ts
@@ -15,28 +15,38 @@ import { WebSocket } from 'ws';
|
||||
|
||||
import { logger } from '@app/core/log.js';
|
||||
import { fileExistsSync } from '@app/core/utils/files/file-exists.js';
|
||||
import { TimeoutBudget } from '@app/core/utils/misc/timeout-budget.js';
|
||||
import { withTimeout } from '@app/core/utils/misc/with-timeout.js';
|
||||
import { getServerIdentifier } from '@app/core/utils/server-identifier.js';
|
||||
import { environment, PATHS_CONFIG_MODULES, PORT } from '@app/environment.js';
|
||||
import * as envVars from '@app/environment.js';
|
||||
import { loadDynamixConfigFile } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { shutdownApiEvent } from '@app/store/actions/shutdown-api-event.js';
|
||||
import { store } from '@app/store/index.js';
|
||||
import { loadDynamixConfig, store } from '@app/store/index.js';
|
||||
import { startMiddlewareListeners } from '@app/store/listeners/listener-middleware.js';
|
||||
import { loadStateFiles } from '@app/store/modules/emhttp.js';
|
||||
import { loadRegistrationKey } from '@app/store/modules/registration.js';
|
||||
import { setupDynamixConfigWatch } from '@app/store/watch/dynamix-config-watch.js';
|
||||
import { setupRegistrationKeyWatch } from '@app/store/watch/registration-watch.js';
|
||||
import { StateManager } from '@app/store/watch/state-watch.js';
|
||||
|
||||
let server: NestFastifyApplication<RawServerDefault> | null = null;
|
||||
|
||||
// PM2 listen_timeout is 15 seconds (ecosystem.config.json)
|
||||
// We use 13 seconds as our total budget to ensure our timeout triggers before PM2 kills us
|
||||
const TOTAL_STARTUP_BUDGET_MS = 30_000;
|
||||
// Reserve time for the NestJS bootstrap (the most critical and time-consuming operation)
|
||||
const BOOTSTRAP_RESERVED_MS = 20_000;
|
||||
// Maximum time for any single pre-bootstrap operation
|
||||
const MAX_OPERATION_TIMEOUT_MS = 5_000;
|
||||
|
||||
const unlinkUnixPort = () => {
|
||||
if (isNaN(parseInt(PORT, 10))) {
|
||||
if (fileExistsSync(PORT)) unlinkSync(PORT);
|
||||
}
|
||||
};
|
||||
|
||||
export const viteNodeApp = async () => {
|
||||
export const viteNodeApp = async (): Promise<NestFastifyApplication<RawServerDefault>> => {
|
||||
const budget = new TimeoutBudget(TOTAL_STARTUP_BUDGET_MS);
|
||||
|
||||
try {
|
||||
await import('json-bigint-patch');
|
||||
environment.IS_MAIN_PROCESS = true;
|
||||
@@ -44,15 +54,15 @@ export const viteNodeApp = async () => {
|
||||
/**------------------------------------------------------------------------
|
||||
* Attaching getServerIdentifier to globalThis
|
||||
|
||||
* getServerIdentifier is tightly coupled to the deprecated redux store,
|
||||
* getServerIdentifier is tightly coupled to the deprecated redux store,
|
||||
* which we don't want to share with other packages or plugins.
|
||||
*
|
||||
*
|
||||
* At the same time, we need to use it in @unraid/shared as a building block,
|
||||
* where it's used & available outside of NestJS's DI context.
|
||||
*
|
||||
* Attaching to globalThis is a temporary solution to avoid refactoring
|
||||
*
|
||||
* Attaching to globalThis is a temporary solution to avoid refactoring
|
||||
* config sync & management outside of NestJS's DI context.
|
||||
*
|
||||
*
|
||||
* Plugin authors should import getServerIdentifier from @unraid/shared instead,
|
||||
* to avoid breaking changes to their code.
|
||||
*------------------------------------------------------------------------**/
|
||||
@@ -60,7 +70,18 @@ export const viteNodeApp = async () => {
|
||||
logger.info('ENV %o', envVars);
|
||||
logger.info('PATHS %o', store.getState().paths);
|
||||
|
||||
await mkdir(PATHS_CONFIG_MODULES, { recursive: true });
|
||||
// Note: we use logger.info for checkpoints instead of a lower log level
|
||||
// to ensure emission during an unraid server's boot,
|
||||
// where the log level will be set to INFO by default.
|
||||
|
||||
// Create config directory
|
||||
try {
|
||||
await mkdir(PATHS_CONFIG_MODULES, { recursive: true });
|
||||
logger.info('Config directory ready');
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to create config directory');
|
||||
throw error;
|
||||
}
|
||||
|
||||
const cacheable = new CacheableLookup();
|
||||
|
||||
@@ -70,32 +91,73 @@ export const viteNodeApp = async () => {
|
||||
cacheable.install(https.globalAgent);
|
||||
|
||||
// Load emhttp state into store
|
||||
await store.dispatch(loadStateFiles());
|
||||
try {
|
||||
const timeout = budget.getTimeout(MAX_OPERATION_TIMEOUT_MS, BOOTSTRAP_RESERVED_MS);
|
||||
await withTimeout(store.dispatch(loadStateFiles()), timeout, 'loadStateFiles');
|
||||
logger.info('Emhttp state loaded');
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to load emhttp state files');
|
||||
logger.warn('Continuing with default state');
|
||||
}
|
||||
|
||||
// Load initial registration key into store
|
||||
await store.dispatch(loadRegistrationKey());
|
||||
try {
|
||||
const timeout = budget.getTimeout(MAX_OPERATION_TIMEOUT_MS, BOOTSTRAP_RESERVED_MS);
|
||||
await withTimeout(store.dispatch(loadRegistrationKey()), timeout, 'loadRegistrationKey');
|
||||
logger.info('Registration key loaded');
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to load registration key');
|
||||
logger.warn('Continuing without registration key');
|
||||
}
|
||||
|
||||
// Load my dynamix config file into store
|
||||
await store.dispatch(loadDynamixConfigFile());
|
||||
try {
|
||||
loadDynamixConfig();
|
||||
logger.info('Dynamix config loaded');
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to load dynamix config');
|
||||
logger.warn('Continuing with default dynamix config');
|
||||
}
|
||||
|
||||
// Start listening to file updates
|
||||
StateManager.getInstance();
|
||||
try {
|
||||
StateManager.getInstance();
|
||||
logger.info('State manager initialized');
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to initialize state manager');
|
||||
logger.warn('Continuing without state watching');
|
||||
}
|
||||
|
||||
// Start listening to key file changes
|
||||
setupRegistrationKeyWatch();
|
||||
|
||||
// Start listening to dynamix config file changes
|
||||
setupDynamixConfigWatch();
|
||||
try {
|
||||
setupRegistrationKeyWatch();
|
||||
logger.info('Registration key watch active');
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to setup registration key watch');
|
||||
logger.warn('Continuing without key file watching');
|
||||
}
|
||||
|
||||
// If port is unix socket, delete old socket before starting http server
|
||||
unlinkUnixPort();
|
||||
|
||||
startMiddlewareListeners();
|
||||
|
||||
// Start webserver
|
||||
const { bootstrapNestServer } = await import('@app/unraid-api/main.js');
|
||||
|
||||
server = await bootstrapNestServer();
|
||||
// Start webserver - use all remaining budget
|
||||
try {
|
||||
const bootstrapTimeout = budget.remaining();
|
||||
if (bootstrapTimeout < 1000) {
|
||||
logger.warn(
|
||||
`Insufficient startup budget remaining (${bootstrapTimeout}ms) for NestJS bootstrap`
|
||||
);
|
||||
}
|
||||
logger.info('Bootstrapping NestJS server (budget: %dms)...', bootstrapTimeout);
|
||||
const { bootstrapNestServer } = await import('@app/unraid-api/main.js');
|
||||
server = await withTimeout(bootstrapNestServer(), bootstrapTimeout, 'bootstrapNestServer');
|
||||
logger.info('Startup complete in %dms', budget.elapsed());
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to start NestJS server');
|
||||
throw error; // This is critical - must rethrow to trigger graceful exit
|
||||
}
|
||||
|
||||
asyncExitHook(
|
||||
async (signal) => {
|
||||
@@ -108,8 +170,10 @@ export const viteNodeApp = async () => {
|
||||
|
||||
gracefulExit();
|
||||
},
|
||||
{ wait: 9999 }
|
||||
{ wait: 10_000 }
|
||||
);
|
||||
|
||||
return server;
|
||||
} catch (error: unknown) {
|
||||
if (error instanceof Error) {
|
||||
logger.error(error, 'API-ERROR');
|
||||
@@ -120,8 +184,9 @@ export const viteNodeApp = async () => {
|
||||
await server?.close?.();
|
||||
}
|
||||
shutdownApiEvent();
|
||||
// Kill application
|
||||
// Kill application - gracefulExit calls process.exit but TS doesn't know it never returns
|
||||
gracefulExit(1);
|
||||
throw new Error('Unreachable');
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
import { F_OK } from 'constants';
|
||||
import { access } from 'fs/promises';
|
||||
|
||||
import { createAsyncThunk } from '@reduxjs/toolkit';
|
||||
import { createTtlMemoizedLoader } from '@unraid/shared';
|
||||
|
||||
import type { RecursivePartial } from '@app/types/index.js';
|
||||
import { type DynamixConfig } from '@app/core/types/ini.js';
|
||||
import { fileExistsSync } from '@app/core/utils/files/file-exists.js';
|
||||
import { parseConfig } from '@app/core/utils/misc/parse-config.js';
|
||||
import { type RecursiveNullable, type RecursivePartial } from '@app/types/index.js';
|
||||
import { batchProcess } from '@app/utils.js';
|
||||
|
||||
/**
|
||||
* Loads a configuration file from disk, parses it to a RecursivePartial of the provided type, and returns it.
|
||||
@@ -16,11 +13,8 @@ import { batchProcess } from '@app/utils.js';
|
||||
* @param path The path to the configuration file on disk.
|
||||
* @returns A parsed RecursivePartial of the provided type.
|
||||
*/
|
||||
async function loadConfigFile<ConfigType>(path: string): Promise<RecursivePartial<ConfigType>> {
|
||||
const fileIsAccessible = await access(path, F_OK)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
return fileIsAccessible
|
||||
function loadConfigFileSync<ConfigType>(path: string): RecursivePartial<ConfigType> {
|
||||
return fileExistsSync(path)
|
||||
? parseConfig<RecursivePartial<ConfigType>>({
|
||||
filePath: path,
|
||||
type: 'ini',
|
||||
@@ -28,21 +22,40 @@ async function loadConfigFile<ConfigType>(path: string): Promise<RecursivePartia
|
||||
: {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the dynamix.cfg into the store.
|
||||
*
|
||||
* Note: If the file doesn't exist this will fallback to default values.
|
||||
*/
|
||||
export const loadDynamixConfigFile = createAsyncThunk<
|
||||
RecursiveNullable<RecursivePartial<DynamixConfig>>,
|
||||
string | undefined
|
||||
>('config/load-dynamix-config-file', async (filePath) => {
|
||||
if (filePath) {
|
||||
return loadConfigFile<DynamixConfig>(filePath);
|
||||
}
|
||||
const store = await import('@app/store/index.js');
|
||||
const paths = store.getters.paths()['dynamix-config'];
|
||||
const { data: configs } = await batchProcess(paths, (path) => loadConfigFile<DynamixConfig>(path));
|
||||
const [defaultConfig = {}, customConfig = {}] = configs;
|
||||
return { ...defaultConfig, ...customConfig };
|
||||
type ConfigPaths = readonly (string | undefined | null)[];
|
||||
const CACHE_WINDOW_MS = 250;
|
||||
|
||||
const memoizedConfigLoader = createTtlMemoizedLoader<
|
||||
RecursivePartial<DynamixConfig>,
|
||||
ConfigPaths,
|
||||
string
|
||||
>({
|
||||
ttlMs: CACHE_WINDOW_MS,
|
||||
getCacheKey: (configPaths: ConfigPaths): string => JSON.stringify(configPaths),
|
||||
load: (configPaths: ConfigPaths) => {
|
||||
const validPaths = configPaths.filter((path): path is string => Boolean(path));
|
||||
if (validPaths.length === 0) {
|
||||
return {};
|
||||
}
|
||||
const configFiles = validPaths.map((path) => loadConfigFileSync<DynamixConfig>(path));
|
||||
return configFiles.reduce<RecursivePartial<DynamixConfig>>(
|
||||
(accumulator, configFile) => ({
|
||||
...accumulator,
|
||||
...configFile,
|
||||
}),
|
||||
{}
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Loads dynamix config from disk with TTL caching.
|
||||
*
|
||||
* @param configPaths - Array of config file paths to load and merge
|
||||
* @returns Merged config object from all valid paths
|
||||
*/
|
||||
export const loadDynamixConfigFromDiskSync = (
|
||||
configPaths: readonly (string | undefined | null)[]
|
||||
): RecursivePartial<DynamixConfig> => {
|
||||
return memoizedConfigLoader.get(configPaths);
|
||||
};
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import { configureStore } from '@reduxjs/toolkit';
|
||||
|
||||
import { logger } from '@app/core/log.js';
|
||||
import { loadDynamixConfigFromDiskSync } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { listenerMiddleware } from '@app/store/listeners/listener-middleware.js';
|
||||
import { updateDynamixConfig } from '@app/store/modules/dynamix.js';
|
||||
import { rootReducer } from '@app/store/root-reducer.js';
|
||||
import { FileLoadStatus } from '@app/store/types.js';
|
||||
|
||||
export const store = configureStore({
|
||||
reducer: rootReducer,
|
||||
@@ -15,8 +19,36 @@ export type RootState = ReturnType<typeof store.getState>;
|
||||
export type AppDispatch = typeof store.dispatch;
|
||||
export type ApiStore = typeof store;
|
||||
|
||||
// loadDynamixConfig is located here and not in the actions/load-dynamix-config-file.js file because it needs to access the store,
|
||||
// and injecting it seemed circular and convoluted for this use case.
|
||||
/**
|
||||
* Loads the dynamix config into the store.
|
||||
* Can be called multiple times - uses TTL caching internally.
|
||||
* @returns The loaded dynamix config.
|
||||
*/
|
||||
export const loadDynamixConfig = () => {
|
||||
const configPaths = store.getState().paths['dynamix-config'] ?? [];
|
||||
try {
|
||||
const config = loadDynamixConfigFromDiskSync(configPaths);
|
||||
store.dispatch(
|
||||
updateDynamixConfig({
|
||||
...config,
|
||||
status: FileLoadStatus.LOADED,
|
||||
})
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(error, 'Failed to load dynamix config from disk');
|
||||
store.dispatch(
|
||||
updateDynamixConfig({
|
||||
status: FileLoadStatus.FAILED_LOADING,
|
||||
})
|
||||
);
|
||||
}
|
||||
return store.getState().dynamix;
|
||||
};
|
||||
|
||||
export const getters = {
|
||||
dynamix: () => store.getState().dynamix,
|
||||
dynamix: () => loadDynamixConfig(),
|
||||
emhttp: () => store.getState().emhttp,
|
||||
paths: () => store.getState().paths,
|
||||
registration: () => store.getState().registration,
|
||||
|
||||
@@ -2,7 +2,6 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
|
||||
import { type DynamixConfig } from '@app/core/types/ini.js';
|
||||
import { loadDynamixConfigFile } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { FileLoadStatus } from '@app/store/types.js';
|
||||
import { RecursivePartial } from '@app/types/index.js';
|
||||
|
||||
@@ -22,24 +21,6 @@ export const dynamix = createSlice({
|
||||
return Object.assign(state, action.payload);
|
||||
},
|
||||
},
|
||||
extraReducers(builder) {
|
||||
builder.addCase(loadDynamixConfigFile.pending, (state) => {
|
||||
state.status = FileLoadStatus.LOADING;
|
||||
});
|
||||
|
||||
builder.addCase(loadDynamixConfigFile.fulfilled, (state, action) => {
|
||||
return {
|
||||
...(action.payload as DynamixConfig),
|
||||
status: FileLoadStatus.LOADED,
|
||||
};
|
||||
});
|
||||
|
||||
builder.addCase(loadDynamixConfigFile.rejected, (state, action) => {
|
||||
Object.assign(state, action.payload, {
|
||||
status: FileLoadStatus.FAILED_LOADING,
|
||||
});
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
export const { updateDynamixConfig } = dynamix.actions;
|
||||
|
||||
@@ -163,6 +163,18 @@ export const loadStateFiles = createAsyncThunk<
|
||||
return state;
|
||||
});
|
||||
|
||||
const stateFieldKeyMap: Record<StateFileKey, keyof SliceState> = {
|
||||
[StateFileKey.var]: 'var',
|
||||
[StateFileKey.devs]: 'devices',
|
||||
[StateFileKey.network]: 'networks',
|
||||
[StateFileKey.nginx]: 'nginx',
|
||||
[StateFileKey.shares]: 'shares',
|
||||
[StateFileKey.disks]: 'disks',
|
||||
[StateFileKey.users]: 'users',
|
||||
[StateFileKey.sec]: 'smbShares',
|
||||
[StateFileKey.sec_nfs]: 'nfsShares',
|
||||
};
|
||||
|
||||
export const emhttp = createSlice({
|
||||
name: 'emhttp',
|
||||
initialState,
|
||||
@@ -175,7 +187,8 @@ export const emhttp = createSlice({
|
||||
}>
|
||||
) {
|
||||
const { field } = action.payload;
|
||||
return Object.assign(state, { [field]: action.payload.state });
|
||||
const targetField = stateFieldKeyMap[field] ?? (field as keyof SliceState);
|
||||
return Object.assign(state, { [targetField]: action.payload.state });
|
||||
},
|
||||
},
|
||||
extraReducers(builder) {
|
||||
|
||||
@@ -20,6 +20,7 @@ const initialState = {
|
||||
process.env.PATHS_UNRAID_DATA ?? ('/boot/config/plugins/dynamix.my.servers/data/' as const)
|
||||
),
|
||||
'docker-autostart': '/var/lib/docker/unraid-autostart' as const,
|
||||
'docker-userprefs': '/boot/config/plugins/dockerMan/userprefs.cfg' as const,
|
||||
'docker-socket': '/var/run/docker.sock' as const,
|
||||
'rclone-socket': resolvePath(process.env.PATHS_RCLONE_SOCKET ?? ('/var/run/rclone.socket' as const)),
|
||||
'parity-checks': resolvePath(
|
||||
|
||||
81
api/src/store/services/__test__/state-file-loader.test.ts
Normal file
81
api/src/store/services/__test__/state-file-loader.test.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { mkdtempSync, readFileSync, rmSync, writeFileSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { store } from '@app/store/index.js';
|
||||
import { loadStateFileSync } from '@app/store/services/state-file-loader.js';
|
||||
import { StateFileKey } from '@app/store/types.js';
|
||||
|
||||
const VAR_FIXTURE = readFileSync(new URL('../../../../dev/states/var.ini', import.meta.url), 'utf-8');
|
||||
|
||||
const writeVarFixture = (dir: string, safeMode: 'yes' | 'no') => {
|
||||
const content = VAR_FIXTURE.replace(/safeMode="(yes|no)"/, `safeMode="${safeMode}"`);
|
||||
writeFileSync(join(dir, `${StateFileKey.var}.ini`), content);
|
||||
};
|
||||
|
||||
describe('loadStateFileSync', () => {
|
||||
let tempDir: string;
|
||||
let baseState: ReturnType<typeof store.getState>;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'state-file-'));
|
||||
baseState = store.getState();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('loads var.ini, updates the store, and returns the parsed state', () => {
|
||||
writeVarFixture(tempDir, 'yes');
|
||||
vi.spyOn(store, 'getState').mockReturnValue({
|
||||
...baseState,
|
||||
paths: {
|
||||
...baseState.paths,
|
||||
states: tempDir,
|
||||
},
|
||||
});
|
||||
const dispatchSpy = vi.spyOn(store, 'dispatch').mockImplementation((action) => action as any);
|
||||
|
||||
const result = loadStateFileSync(StateFileKey.var);
|
||||
|
||||
expect(result?.safeMode).toBe(true);
|
||||
expect(dispatchSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
type: 'emhttp/updateEmhttpState',
|
||||
payload: {
|
||||
field: StateFileKey.var,
|
||||
state: expect.objectContaining({ safeMode: true }),
|
||||
},
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('returns null when the states path is missing', () => {
|
||||
vi.spyOn(store, 'getState').mockReturnValue({
|
||||
...baseState,
|
||||
paths: undefined,
|
||||
} as any);
|
||||
const dispatchSpy = vi.spyOn(store, 'dispatch');
|
||||
|
||||
expect(loadStateFileSync(StateFileKey.var)).toBeNull();
|
||||
expect(dispatchSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('returns null when the requested state file cannot be found', () => {
|
||||
vi.spyOn(store, 'getState').mockReturnValue({
|
||||
...baseState,
|
||||
paths: {
|
||||
...baseState.paths,
|
||||
states: tempDir,
|
||||
},
|
||||
});
|
||||
const dispatchSpy = vi.spyOn(store, 'dispatch');
|
||||
|
||||
expect(loadStateFileSync(StateFileKey.var)).toBeNull();
|
||||
expect(dispatchSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
81
api/src/store/services/state-file-loader.ts
Normal file
81
api/src/store/services/state-file-loader.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { join } from 'node:path';
|
||||
|
||||
import type { SliceState } from '@app/store/modules/emhttp.js';
|
||||
import type { StateFileToIniParserMap } from '@app/store/types.js';
|
||||
import { parseConfig } from '@app/core/utils/misc/parse-config.js';
|
||||
import { store } from '@app/store/index.js';
|
||||
import { updateEmhttpState } from '@app/store/modules/emhttp.js';
|
||||
import { parse as parseDevices } from '@app/store/state-parsers/devices.js';
|
||||
import { parse as parseNetwork } from '@app/store/state-parsers/network.js';
|
||||
import { parse as parseNfs } from '@app/store/state-parsers/nfs.js';
|
||||
import { parse as parseNginx } from '@app/store/state-parsers/nginx.js';
|
||||
import { parse as parseShares } from '@app/store/state-parsers/shares.js';
|
||||
import { parse as parseSlots } from '@app/store/state-parsers/slots.js';
|
||||
import { parse as parseSmb } from '@app/store/state-parsers/smb.js';
|
||||
import { parse as parseUsers } from '@app/store/state-parsers/users.js';
|
||||
import { parse as parseVar } from '@app/store/state-parsers/var.js';
|
||||
import { StateFileKey } from '@app/store/types.js';
|
||||
|
||||
type ParserReturnMap = {
|
||||
[StateFileKey.var]: ReturnType<typeof parseVar>;
|
||||
[StateFileKey.devs]: ReturnType<typeof parseDevices>;
|
||||
[StateFileKey.network]: ReturnType<typeof parseNetwork>;
|
||||
[StateFileKey.nginx]: ReturnType<typeof parseNginx>;
|
||||
[StateFileKey.shares]: ReturnType<typeof parseShares>;
|
||||
[StateFileKey.disks]: ReturnType<typeof parseSlots>;
|
||||
[StateFileKey.users]: ReturnType<typeof parseUsers>;
|
||||
[StateFileKey.sec]: ReturnType<typeof parseSmb>;
|
||||
[StateFileKey.sec_nfs]: ReturnType<typeof parseNfs>;
|
||||
};
|
||||
|
||||
const PARSER_MAP: { [K in StateFileKey]: StateFileToIniParserMap[K] } = {
|
||||
[StateFileKey.var]: parseVar,
|
||||
[StateFileKey.devs]: parseDevices,
|
||||
[StateFileKey.network]: parseNetwork,
|
||||
[StateFileKey.nginx]: parseNginx,
|
||||
[StateFileKey.shares]: parseShares,
|
||||
[StateFileKey.disks]: parseSlots,
|
||||
[StateFileKey.users]: parseUsers,
|
||||
[StateFileKey.sec]: parseSmb,
|
||||
[StateFileKey.sec_nfs]: parseNfs,
|
||||
};
|
||||
|
||||
/**
|
||||
* Synchronously loads an emhttp state file, updates the Redux store slice, and returns the parsed state.
|
||||
*
|
||||
* Designed for bootstrap contexts (CLI, plugin loading, etc.) where dispatching the async thunks is
|
||||
* impractical but we still need authoritative emhttp state from disk.
|
||||
*/
|
||||
export const loadStateFileSync = <K extends StateFileKey>(
|
||||
stateFileKey: K
|
||||
): ParserReturnMap[K] | null => {
|
||||
const state = store.getState();
|
||||
const statesDirectory = state.paths?.states;
|
||||
|
||||
if (!statesDirectory) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const filePath = join(statesDirectory, `${stateFileKey}.ini`);
|
||||
|
||||
try {
|
||||
const parser = PARSER_MAP[stateFileKey] as StateFileToIniParserMap[K];
|
||||
const rawConfig = parseConfig<Record<string, unknown>>({
|
||||
filePath,
|
||||
type: 'ini',
|
||||
});
|
||||
const config = rawConfig as Parameters<StateFileToIniParserMap[K]>[0];
|
||||
const parsed = (parser as (input: any) => ParserReturnMap[K])(config);
|
||||
|
||||
store.dispatch(
|
||||
updateEmhttpState({
|
||||
field: stateFileKey,
|
||||
state: parsed as Partial<SliceState[keyof SliceState]>,
|
||||
})
|
||||
);
|
||||
|
||||
return parsed;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
@@ -1,17 +0,0 @@
|
||||
import { watch } from 'chokidar';
|
||||
|
||||
import { loadDynamixConfigFile } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { getters, store } from '@app/store/index.js';
|
||||
|
||||
export const setupDynamixConfigWatch = () => {
|
||||
const configPath = getters.paths()?.['dynamix-config'];
|
||||
|
||||
// Update store when cfg changes
|
||||
watch(configPath, {
|
||||
persistent: true,
|
||||
ignoreInitial: true,
|
||||
}).on('change', async () => {
|
||||
// Load updated dynamix config file into store
|
||||
await store.dispatch(loadDynamixConfigFile());
|
||||
});
|
||||
};
|
||||
@@ -1,17 +1,51 @@
|
||||
import { watch } from 'chokidar';
|
||||
|
||||
import { CHOKIDAR_USEPOLLING } from '@app/environment.js';
|
||||
import { store } from '@app/store/index.js';
|
||||
import { keyServerLogger } from '@app/core/log.js';
|
||||
import { getters, store } from '@app/store/index.js';
|
||||
import { loadSingleStateFile } from '@app/store/modules/emhttp.js';
|
||||
import { loadRegistrationKey } from '@app/store/modules/registration.js';
|
||||
import { StateFileKey } from '@app/store/types.js';
|
||||
|
||||
/**
|
||||
* Reloads var.ini with retry logic to handle timing issues with emhttpd.
|
||||
* When a key file changes, emhttpd needs time to process it and update var.ini.
|
||||
* This function retries loading var.ini until the registration state changes
|
||||
* or max retries are exhausted.
|
||||
*/
|
||||
export const reloadVarIniWithRetry = async (maxRetries = 3): Promise<void> => {
|
||||
const beforeState = getters.emhttp().var?.regTy;
|
||||
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
const delay = 500 * Math.pow(2, attempt); // 500ms, 1s, 2s
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
|
||||
await store.dispatch(loadSingleStateFile(StateFileKey.var));
|
||||
|
||||
const afterState = getters.emhttp().var?.regTy;
|
||||
if (beforeState !== afterState) {
|
||||
keyServerLogger.info('Registration state updated: %s -> %s', beforeState, afterState);
|
||||
return;
|
||||
}
|
||||
keyServerLogger.debug('Retry %d: var.ini regTy still %s', attempt + 1, afterState);
|
||||
}
|
||||
keyServerLogger.debug('var.ini regTy unchanged after %d retries (may be expected)', maxRetries);
|
||||
};
|
||||
|
||||
export const setupRegistrationKeyWatch = () => {
|
||||
// IMPORTANT: /boot/config is on FAT32 flash drive which does NOT support inotify
|
||||
// Must use polling to detect file changes on FAT32 filesystems
|
||||
watch('/boot/config', {
|
||||
persistent: true,
|
||||
ignoreInitial: true,
|
||||
ignored: (path: string) => !path.endsWith('.key'),
|
||||
usePolling: CHOKIDAR_USEPOLLING === true,
|
||||
}).on('all', async () => {
|
||||
// Load updated key into store
|
||||
usePolling: true, // Required for FAT32 - inotify doesn't work
|
||||
interval: 5000, // Poll every 5 seconds (balance between responsiveness and CPU usage)
|
||||
}).on('all', async (event, path) => {
|
||||
keyServerLogger.info('Key file %s: %s', event, path);
|
||||
|
||||
await store.dispatch(loadRegistrationKey());
|
||||
|
||||
// Reload var.ini to get updated registration metadata from emhttpd
|
||||
await reloadVarIniWithRetry();
|
||||
});
|
||||
};
|
||||
|
||||
@@ -6,103 +6,60 @@ import { AuthZGuard } from 'nest-authz';
|
||||
import request from 'supertest';
|
||||
import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { loadDynamixConfigFile } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { store } from '@app/store/index.js';
|
||||
import { loadStateFiles } from '@app/store/modules/emhttp.js';
|
||||
import { AppModule } from '@app/unraid-api/app/app.module.js';
|
||||
import { AuthService } from '@app/unraid-api/auth/auth.service.js';
|
||||
import { AuthenticationGuard } from '@app/unraid-api/auth/authentication.guard.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
|
||||
// Mock external system boundaries that we can't control in tests
|
||||
vi.mock('dockerode', () => {
|
||||
return {
|
||||
default: vi.fn().mockImplementation(() => ({
|
||||
listContainers: vi.fn().mockResolvedValue([
|
||||
{
|
||||
Id: 'test-container-1',
|
||||
Names: ['/test-container'],
|
||||
State: 'running',
|
||||
Status: 'Up 5 minutes',
|
||||
Image: 'test:latest',
|
||||
Command: 'node server.js',
|
||||
Created: Date.now() / 1000,
|
||||
Ports: [
|
||||
{
|
||||
IP: '0.0.0.0',
|
||||
PrivatePort: 3000,
|
||||
PublicPort: 3000,
|
||||
Type: 'tcp',
|
||||
},
|
||||
],
|
||||
Labels: {},
|
||||
HostConfig: {
|
||||
NetworkMode: 'bridge',
|
||||
},
|
||||
NetworkSettings: {
|
||||
Networks: {},
|
||||
},
|
||||
Mounts: [],
|
||||
// Mock the store before importing it
|
||||
vi.mock('@app/store/index.js', () => ({
|
||||
store: {
|
||||
dispatch: vi.fn().mockResolvedValue(undefined),
|
||||
subscribe: vi.fn().mockImplementation(() => vi.fn()),
|
||||
getState: vi.fn().mockReturnValue({
|
||||
emhttp: {
|
||||
var: {
|
||||
csrfToken: 'test-csrf-token',
|
||||
},
|
||||
]),
|
||||
getContainer: vi.fn().mockImplementation((id) => ({
|
||||
inspect: vi.fn().mockResolvedValue({
|
||||
Id: id,
|
||||
Name: '/test-container',
|
||||
State: { Running: true },
|
||||
Config: { Image: 'test:latest' },
|
||||
}),
|
||||
})),
|
||||
listImages: vi.fn().mockResolvedValue([]),
|
||||
listNetworks: vi.fn().mockResolvedValue([]),
|
||||
listVolumes: vi.fn().mockResolvedValue({ Volumes: [] }),
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
// Mock external command execution
|
||||
vi.mock('execa', () => ({
|
||||
execa: vi.fn().mockImplementation((cmd) => {
|
||||
if (cmd === 'whoami') {
|
||||
return Promise.resolve({ stdout: 'testuser' });
|
||||
}
|
||||
return Promise.resolve({ stdout: 'mocked output' });
|
||||
}),
|
||||
},
|
||||
docker: {
|
||||
containers: [],
|
||||
autostart: [],
|
||||
},
|
||||
}),
|
||||
unsubscribe: vi.fn(),
|
||||
},
|
||||
getters: {
|
||||
emhttp: vi.fn().mockReturnValue({
|
||||
var: {
|
||||
csrfToken: 'test-csrf-token',
|
||||
},
|
||||
}),
|
||||
docker: vi.fn().mockReturnValue({
|
||||
containers: [],
|
||||
autostart: [],
|
||||
}),
|
||||
paths: vi.fn().mockReturnValue({
|
||||
'docker-autostart': '/tmp/docker-autostart',
|
||||
'docker-socket': '/var/run/docker.sock',
|
||||
'var-run': '/var/run',
|
||||
'auth-keys': '/tmp/auth-keys',
|
||||
activationBase: '/tmp/activation',
|
||||
'dynamix-config': ['/tmp/dynamix-config', '/tmp/dynamix-config'],
|
||||
identConfig: '/tmp/ident.cfg',
|
||||
}),
|
||||
dynamix: vi.fn().mockReturnValue({
|
||||
notify: {
|
||||
path: '/tmp/notifications',
|
||||
},
|
||||
}),
|
||||
},
|
||||
loadDynamixConfig: vi.fn(),
|
||||
loadStateFiles: vi.fn().mockResolvedValue(undefined),
|
||||
}));
|
||||
|
||||
// Mock child_process for services that spawn processes
|
||||
vi.mock('node:child_process', () => ({
|
||||
spawn: vi.fn(() => ({
|
||||
on: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
stdout: { on: vi.fn() },
|
||||
stderr: { on: vi.fn() },
|
||||
})),
|
||||
}));
|
||||
|
||||
// Mock file system operations that would fail in test environment
|
||||
vi.mock('node:fs/promises', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs/promises')>();
|
||||
return {
|
||||
...actual,
|
||||
readFile: vi.fn().mockResolvedValue(''),
|
||||
writeFile: vi.fn().mockResolvedValue(undefined),
|
||||
mkdir: vi.fn().mockResolvedValue(undefined),
|
||||
access: vi.fn().mockResolvedValue(undefined),
|
||||
stat: vi.fn().mockResolvedValue({ isFile: () => true }),
|
||||
readdir: vi.fn().mockResolvedValue([]),
|
||||
rename: vi.fn().mockResolvedValue(undefined),
|
||||
unlink: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
});
|
||||
|
||||
// Mock fs module for synchronous operations
|
||||
vi.mock('node:fs', () => ({
|
||||
existsSync: vi.fn().mockReturnValue(false),
|
||||
readFileSync: vi.fn().mockReturnValue(''),
|
||||
writeFileSync: vi.fn(),
|
||||
mkdirSync: vi.fn(),
|
||||
readdirSync: vi.fn().mockReturnValue([]),
|
||||
// Mock fs-extra for directory operations
|
||||
vi.mock('fs-extra', () => ({
|
||||
ensureDirSync: vi.fn().mockReturnValue(undefined),
|
||||
}));
|
||||
|
||||
describe('AppModule Integration Tests', () => {
|
||||
@@ -110,14 +67,6 @@ describe('AppModule Integration Tests', () => {
|
||||
let moduleRef: TestingModule;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Initialize the dynamix config and state files before creating the module
|
||||
await store.dispatch(loadDynamixConfigFile());
|
||||
await store.dispatch(loadStateFiles());
|
||||
|
||||
// Debug: Log the CSRF token from the store
|
||||
const { getters } = await import('@app/store/index.js');
|
||||
console.log('CSRF Token from store:', getters.emhttp().var.csrfToken);
|
||||
|
||||
moduleRef = await Test.createTestingModule({
|
||||
imports: [AppModule],
|
||||
})
|
||||
@@ -150,14 +99,6 @@ describe('AppModule Integration Tests', () => {
|
||||
roles: ['admin'],
|
||||
}),
|
||||
})
|
||||
// Override Redis client
|
||||
.overrideProvider('REDIS_CLIENT')
|
||||
.useValue({
|
||||
get: vi.fn(),
|
||||
set: vi.fn(),
|
||||
del: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
})
|
||||
.compile();
|
||||
|
||||
app = moduleRef.createNestApplication<NestFastifyApplication>(new FastifyAdapter());
|
||||
@@ -178,9 +119,9 @@ describe('AppModule Integration Tests', () => {
|
||||
});
|
||||
|
||||
it('should resolve core services', () => {
|
||||
const dockerService = moduleRef.get(DockerService);
|
||||
const authService = moduleRef.get(AuthService);
|
||||
|
||||
expect(dockerService).toBeDefined();
|
||||
expect(authService).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -239,18 +180,12 @@ describe('AppModule Integration Tests', () => {
|
||||
});
|
||||
|
||||
describe('Service Integration', () => {
|
||||
it('should have working service-to-service communication', async () => {
|
||||
const dockerService = moduleRef.get(DockerService);
|
||||
|
||||
// Test that the service can be called and returns expected data structure
|
||||
const containers = await dockerService.getContainers();
|
||||
|
||||
expect(containers).toBeInstanceOf(Array);
|
||||
// The containers might be empty or cached, just verify structure
|
||||
if (containers.length > 0) {
|
||||
expect(containers[0]).toHaveProperty('id');
|
||||
expect(containers[0]).toHaveProperty('names');
|
||||
}
|
||||
it('should have working service-to-service communication', () => {
|
||||
// Test that the module can resolve its services without errors
|
||||
// This validates that dependency injection is working correctly
|
||||
const authService = moduleRef.get(AuthService);
|
||||
expect(authService).toBeDefined();
|
||||
expect(typeof authService.validateCookiesWithCsrfToken).toBe('function');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { CacheModule } from '@nestjs/cache-manager';
|
||||
import { ConfigModule } from '@nestjs/config';
|
||||
import { Test } from '@nestjs/testing';
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
@@ -10,7 +11,11 @@ describe('Module Dependencies Integration', () => {
|
||||
let module;
|
||||
try {
|
||||
module = await Test.createTestingModule({
|
||||
imports: [CacheModule.register({ isGlobal: true }), RestModule],
|
||||
imports: [
|
||||
ConfigModule.forRoot({ ignoreEnvFile: true, isGlobal: true }),
|
||||
CacheModule.register({ isGlobal: true }),
|
||||
RestModule,
|
||||
],
|
||||
}).compile();
|
||||
|
||||
expect(module).toBeDefined();
|
||||
|
||||
@@ -183,6 +183,11 @@ export class ApiKeyService implements OnModuleInit {
|
||||
|
||||
async loadAllFromDisk(): Promise<ApiKey[]> {
|
||||
const files = await readdir(this.basePath).catch((error) => {
|
||||
if (error.code === 'ENOENT') {
|
||||
// Directory doesn't exist, which means no API keys have been created yet
|
||||
this.logger.error(`API key directory does not exist: ${this.basePath}`);
|
||||
return [];
|
||||
}
|
||||
this.logger.error(`Failed to read API key directory: ${error}`);
|
||||
throw new Error('Failed to list API keys');
|
||||
});
|
||||
|
||||
@@ -36,6 +36,7 @@ const mockPluginManagementService = {
|
||||
addPlugin: vi.fn(),
|
||||
addBundledPlugin: vi.fn(),
|
||||
removePlugin: vi.fn(),
|
||||
removePluginConfigOnly: vi.fn(),
|
||||
removeBundledPlugin: vi.fn(),
|
||||
plugins: [] as string[],
|
||||
};
|
||||
@@ -147,6 +148,7 @@ describe('Plugin Commands', () => {
|
||||
'@unraid/plugin-example',
|
||||
'@unraid/plugin-test'
|
||||
);
|
||||
expect(mockPluginManagementService.removePluginConfigOnly).not.toHaveBeenCalled();
|
||||
expect(mockLogger.log).toHaveBeenCalledWith('Removed plugin @unraid/plugin-example');
|
||||
expect(mockLogger.log).toHaveBeenCalledWith('Removed plugin @unraid/plugin-test');
|
||||
expect(mockApiConfigPersistence.persist).toHaveBeenCalled();
|
||||
@@ -178,9 +180,72 @@ describe('Plugin Commands', () => {
|
||||
expect(mockPluginManagementService.removePlugin).toHaveBeenCalledWith(
|
||||
'@unraid/plugin-example'
|
||||
);
|
||||
expect(mockPluginManagementService.removePluginConfigOnly).not.toHaveBeenCalled();
|
||||
expect(mockApiConfigPersistence.persist).toHaveBeenCalled();
|
||||
expect(mockRestartCommand.run).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should bypass npm uninstall when bypass flag is provided', async () => {
|
||||
mockInquirerService.prompt.mockResolvedValue({
|
||||
plugins: ['@unraid/plugin-example'],
|
||||
restart: true,
|
||||
bypassNpm: true,
|
||||
});
|
||||
|
||||
await command.run([], { restart: true, bypassNpm: true });
|
||||
|
||||
expect(mockPluginManagementService.removePluginConfigOnly).toHaveBeenCalledWith(
|
||||
'@unraid/plugin-example'
|
||||
);
|
||||
expect(mockPluginManagementService.removePlugin).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should preserve cli flags when prompt supplies plugins', async () => {
|
||||
mockInquirerService.prompt.mockResolvedValue({
|
||||
plugins: ['@unraid/plugin-example'],
|
||||
});
|
||||
|
||||
await command.run([], { restart: false, bypassNpm: true });
|
||||
|
||||
expect(mockPluginManagementService.removePluginConfigOnly).toHaveBeenCalledWith(
|
||||
'@unraid/plugin-example'
|
||||
);
|
||||
expect(mockPluginManagementService.removePlugin).not.toHaveBeenCalled();
|
||||
expect(mockRestartCommand.run).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should honor prompt restart value when cli flag not provided', async () => {
|
||||
mockInquirerService.prompt.mockResolvedValue({
|
||||
plugins: ['@unraid/plugin-example'],
|
||||
restart: false,
|
||||
});
|
||||
|
||||
await command.run([], {});
|
||||
|
||||
expect(mockPluginManagementService.removePlugin).toHaveBeenCalledWith(
|
||||
'@unraid/plugin-example'
|
||||
);
|
||||
expect(mockRestartCommand.run).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should respect passed params and skip inquirer', async () => {
|
||||
await command.run(['@unraid/plugin-example'], { restart: true, bypassNpm: false });
|
||||
|
||||
expect(mockInquirerService.prompt).not.toHaveBeenCalled();
|
||||
expect(mockPluginManagementService.removePlugin).toHaveBeenCalledWith(
|
||||
'@unraid/plugin-example'
|
||||
);
|
||||
});
|
||||
|
||||
it('should bypass npm when flag provided with passed params', async () => {
|
||||
await command.run(['@unraid/plugin-example'], { restart: true, bypassNpm: true });
|
||||
|
||||
expect(mockInquirerService.prompt).not.toHaveBeenCalled();
|
||||
expect(mockPluginManagementService.removePluginConfigOnly).toHaveBeenCalledWith(
|
||||
'@unraid/plugin-example'
|
||||
);
|
||||
expect(mockPluginManagementService.removePlugin).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('ListPluginCommand', () => {
|
||||
|
||||
@@ -525,6 +525,7 @@ export enum ContainerPortType {
|
||||
|
||||
export enum ContainerState {
|
||||
EXITED = 'EXITED',
|
||||
PAUSED = 'PAUSED',
|
||||
RUNNING = 'RUNNING'
|
||||
}
|
||||
|
||||
@@ -678,11 +679,20 @@ export enum DiskSmartStatus {
|
||||
|
||||
export type Docker = Node & {
|
||||
__typename?: 'Docker';
|
||||
container?: Maybe<DockerContainer>;
|
||||
containerUpdateStatuses: Array<ExplicitStatusItem>;
|
||||
containers: Array<DockerContainer>;
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
/** Access container logs. Requires specifying a target container id through resolver arguments. */
|
||||
logs: DockerContainerLogs;
|
||||
networks: Array<DockerNetwork>;
|
||||
organizer: ResolvedOrganizerV1;
|
||||
portConflicts: DockerPortConflicts;
|
||||
};
|
||||
|
||||
|
||||
export type DockerContainerArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
};
|
||||
|
||||
|
||||
@@ -691,38 +701,169 @@ export type DockerContainersArgs = {
|
||||
};
|
||||
|
||||
|
||||
export type DockerLogsArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
since?: InputMaybe<Scalars['DateTime']['input']>;
|
||||
tail?: InputMaybe<Scalars['Int']['input']>;
|
||||
};
|
||||
|
||||
|
||||
export type DockerNetworksArgs = {
|
||||
skipCache?: Scalars['Boolean']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type DockerOrganizerArgs = {
|
||||
skipCache?: Scalars['Boolean']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type DockerPortConflictsArgs = {
|
||||
skipCache?: Scalars['Boolean']['input'];
|
||||
};
|
||||
|
||||
export type DockerAutostartEntryInput = {
|
||||
/** Whether the container should auto-start */
|
||||
autoStart: Scalars['Boolean']['input'];
|
||||
/** Docker container identifier */
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
/** Number of seconds to wait after starting the container */
|
||||
wait?: InputMaybe<Scalars['Int']['input']>;
|
||||
};
|
||||
|
||||
export type DockerContainer = Node & {
|
||||
__typename?: 'DockerContainer';
|
||||
autoStart: Scalars['Boolean']['output'];
|
||||
/** Zero-based order in the auto-start list */
|
||||
autoStartOrder?: Maybe<Scalars['Int']['output']>;
|
||||
/** Wait time in seconds applied after start */
|
||||
autoStartWait?: Maybe<Scalars['Int']['output']>;
|
||||
command: Scalars['String']['output'];
|
||||
created: Scalars['Int']['output'];
|
||||
hostConfig?: Maybe<ContainerHostConfig>;
|
||||
/** Icon URL */
|
||||
iconUrl?: Maybe<Scalars['String']['output']>;
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
image: Scalars['String']['output'];
|
||||
imageId: Scalars['String']['output'];
|
||||
/** Whether the container is orphaned (no template found) */
|
||||
isOrphaned: Scalars['Boolean']['output'];
|
||||
isRebuildReady?: Maybe<Scalars['Boolean']['output']>;
|
||||
isUpdateAvailable?: Maybe<Scalars['Boolean']['output']>;
|
||||
labels?: Maybe<Scalars['JSON']['output']>;
|
||||
/** List of LAN-accessible host:port values */
|
||||
lanIpPorts?: Maybe<Array<Scalars['String']['output']>>;
|
||||
mounts?: Maybe<Array<Scalars['JSON']['output']>>;
|
||||
names: Array<Scalars['String']['output']>;
|
||||
networkSettings?: Maybe<Scalars['JSON']['output']>;
|
||||
ports: Array<ContainerPort>;
|
||||
/** Project/Product homepage URL */
|
||||
projectUrl?: Maybe<Scalars['String']['output']>;
|
||||
/** Registry/Docker Hub URL */
|
||||
registryUrl?: Maybe<Scalars['String']['output']>;
|
||||
/** Shell to use for console access (from template) */
|
||||
shell?: Maybe<Scalars['String']['output']>;
|
||||
/** Size of container logs (in bytes) */
|
||||
sizeLog?: Maybe<Scalars['BigInt']['output']>;
|
||||
/** Total size of all files in the container (in bytes) */
|
||||
sizeRootFs?: Maybe<Scalars['BigInt']['output']>;
|
||||
/** Size of writable layer (in bytes) */
|
||||
sizeRw?: Maybe<Scalars['BigInt']['output']>;
|
||||
state: ContainerState;
|
||||
status: Scalars['String']['output'];
|
||||
/** Support page/thread URL */
|
||||
supportUrl?: Maybe<Scalars['String']['output']>;
|
||||
/** Whether Tailscale is enabled for this container */
|
||||
tailscaleEnabled: Scalars['Boolean']['output'];
|
||||
/** Tailscale status for this container (fetched via docker exec) */
|
||||
tailscaleStatus?: Maybe<TailscaleStatus>;
|
||||
templatePath?: Maybe<Scalars['String']['output']>;
|
||||
/** Port mappings from template (used when container is not running) */
|
||||
templatePorts?: Maybe<Array<ContainerPort>>;
|
||||
/** Resolved WebUI URL from template */
|
||||
webUiUrl?: Maybe<Scalars['String']['output']>;
|
||||
};
|
||||
|
||||
|
||||
export type DockerContainerTailscaleStatusArgs = {
|
||||
forceRefresh?: InputMaybe<Scalars['Boolean']['input']>;
|
||||
};
|
||||
|
||||
export type DockerContainerLogLine = {
|
||||
__typename?: 'DockerContainerLogLine';
|
||||
message: Scalars['String']['output'];
|
||||
timestamp: Scalars['DateTime']['output'];
|
||||
};
|
||||
|
||||
export type DockerContainerLogs = {
|
||||
__typename?: 'DockerContainerLogs';
|
||||
containerId: Scalars['PrefixedID']['output'];
|
||||
/** Cursor that can be passed back through the since argument to continue streaming logs. */
|
||||
cursor?: Maybe<Scalars['DateTime']['output']>;
|
||||
lines: Array<DockerContainerLogLine>;
|
||||
};
|
||||
|
||||
export type DockerContainerPortConflict = {
|
||||
__typename?: 'DockerContainerPortConflict';
|
||||
containers: Array<DockerPortConflictContainer>;
|
||||
privatePort: Scalars['Port']['output'];
|
||||
type: ContainerPortType;
|
||||
};
|
||||
|
||||
export type DockerContainerStats = {
|
||||
__typename?: 'DockerContainerStats';
|
||||
/** Block I/O String (e.g. 100MB / 1GB) */
|
||||
blockIO: Scalars['String']['output'];
|
||||
/** CPU Usage Percentage */
|
||||
cpuPercent: Scalars['Float']['output'];
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
/** Memory Usage Percentage */
|
||||
memPercent: Scalars['Float']['output'];
|
||||
/** Memory Usage String (e.g. 100MB / 1GB) */
|
||||
memUsage: Scalars['String']['output'];
|
||||
/** Network I/O String (e.g. 100MB / 1GB) */
|
||||
netIO: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type DockerLanPortConflict = {
|
||||
__typename?: 'DockerLanPortConflict';
|
||||
containers: Array<DockerPortConflictContainer>;
|
||||
lanIpPort: Scalars['String']['output'];
|
||||
publicPort?: Maybe<Scalars['Port']['output']>;
|
||||
type: ContainerPortType;
|
||||
};
|
||||
|
||||
export type DockerMutations = {
|
||||
__typename?: 'DockerMutations';
|
||||
/** Pause (Suspend) a container */
|
||||
pause: DockerContainer;
|
||||
/** Remove a container */
|
||||
removeContainer: Scalars['Boolean']['output'];
|
||||
/** Start a container */
|
||||
start: DockerContainer;
|
||||
/** Stop a container */
|
||||
stop: DockerContainer;
|
||||
/** Unpause (Resume) a container */
|
||||
unpause: DockerContainer;
|
||||
/** Update all containers that have available updates */
|
||||
updateAllContainers: Array<DockerContainer>;
|
||||
/** Update auto-start configuration for Docker containers */
|
||||
updateAutostartConfiguration: Scalars['Boolean']['output'];
|
||||
/** Update a container to the latest image */
|
||||
updateContainer: DockerContainer;
|
||||
/** Update multiple containers to the latest images */
|
||||
updateContainers: Array<DockerContainer>;
|
||||
};
|
||||
|
||||
|
||||
export type DockerMutationsPauseArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type DockerMutationsRemoveContainerArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
withImage?: InputMaybe<Scalars['Boolean']['input']>;
|
||||
};
|
||||
|
||||
|
||||
@@ -735,6 +876,27 @@ export type DockerMutationsStopArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type DockerMutationsUnpauseArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type DockerMutationsUpdateAutostartConfigurationArgs = {
|
||||
entries: Array<DockerAutostartEntryInput>;
|
||||
persistUserPreferences?: InputMaybe<Scalars['Boolean']['input']>;
|
||||
};
|
||||
|
||||
|
||||
export type DockerMutationsUpdateContainerArgs = {
|
||||
id: Scalars['PrefixedID']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type DockerMutationsUpdateContainersArgs = {
|
||||
ids: Array<Scalars['PrefixedID']['input']>;
|
||||
};
|
||||
|
||||
export type DockerNetwork = Node & {
|
||||
__typename?: 'DockerNetwork';
|
||||
attachable: Scalars['Boolean']['output'];
|
||||
@@ -754,6 +916,26 @@ export type DockerNetwork = Node & {
|
||||
scope: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type DockerPortConflictContainer = {
|
||||
__typename?: 'DockerPortConflictContainer';
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
name: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type DockerPortConflicts = {
|
||||
__typename?: 'DockerPortConflicts';
|
||||
containerPorts: Array<DockerContainerPortConflict>;
|
||||
lanPorts: Array<DockerLanPortConflict>;
|
||||
};
|
||||
|
||||
export type DockerTemplateSyncResult = {
|
||||
__typename?: 'DockerTemplateSyncResult';
|
||||
errors: Array<Scalars['String']['output']>;
|
||||
matched: Scalars['Int']['output'];
|
||||
scanned: Scalars['Int']['output'];
|
||||
skipped: Scalars['Int']['output'];
|
||||
};
|
||||
|
||||
export type DynamicRemoteAccessStatus = {
|
||||
__typename?: 'DynamicRemoteAccessStatus';
|
||||
/** The type of dynamic remote access that is enabled */
|
||||
@@ -799,6 +981,20 @@ export type FlashBackupStatus = {
|
||||
status: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type FlatOrganizerEntry = {
|
||||
__typename?: 'FlatOrganizerEntry';
|
||||
childrenIds: Array<Scalars['String']['output']>;
|
||||
depth: Scalars['Float']['output'];
|
||||
hasChildren: Scalars['Boolean']['output'];
|
||||
id: Scalars['String']['output'];
|
||||
meta?: Maybe<DockerContainer>;
|
||||
name: Scalars['String']['output'];
|
||||
parentId?: Maybe<Scalars['String']['output']>;
|
||||
path: Array<Scalars['String']['output']>;
|
||||
position: Scalars['Float']['output'];
|
||||
type: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type FormSchema = {
|
||||
/** The data schema for the form */
|
||||
dataSchema: Scalars['JSON']['output'];
|
||||
@@ -1223,6 +1419,7 @@ export type Mutation = {
|
||||
connectSignIn: Scalars['Boolean']['output'];
|
||||
connectSignOut: Scalars['Boolean']['output'];
|
||||
createDockerFolder: ResolvedOrganizerV1;
|
||||
createDockerFolderWithItems: ResolvedOrganizerV1;
|
||||
/** Creates a new notification record */
|
||||
createNotification: Notification;
|
||||
/** Deletes all archived notifications on server. */
|
||||
@@ -1234,6 +1431,9 @@ export type Mutation = {
|
||||
/** Initiates a flash drive backup using a configured remote. */
|
||||
initiateFlashBackup: FlashBackupStatus;
|
||||
moveDockerEntriesToFolder: ResolvedOrganizerV1;
|
||||
moveDockerItemsToPosition: ResolvedOrganizerV1;
|
||||
/** Creates a notification if an equivalent unread notification does not already exist. */
|
||||
notifyIfUnique?: Maybe<Notification>;
|
||||
parityCheck: ParityCheckMutations;
|
||||
rclone: RCloneMutations;
|
||||
/** Reads each notification to recompute & update the overview. */
|
||||
@@ -1241,13 +1441,18 @@ export type Mutation = {
|
||||
refreshDockerDigests: Scalars['Boolean']['output'];
|
||||
/** Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. */
|
||||
removePlugin: Scalars['Boolean']['output'];
|
||||
renameDockerFolder: ResolvedOrganizerV1;
|
||||
/** Reset Docker template mappings to defaults. Use this to recover from corrupted state. */
|
||||
resetDockerTemplateMappings: Scalars['Boolean']['output'];
|
||||
setDockerFolderChildren: ResolvedOrganizerV1;
|
||||
setupRemoteAccess: Scalars['Boolean']['output'];
|
||||
syncDockerTemplatePaths: DockerTemplateSyncResult;
|
||||
unarchiveAll: NotificationOverview;
|
||||
unarchiveNotifications: NotificationOverview;
|
||||
/** Marks a notification as unread. */
|
||||
unreadNotification: Notification;
|
||||
updateApiSettings: ConnectSettingsValues;
|
||||
updateDockerViewPreferences: ResolvedOrganizerV1;
|
||||
updateSettings: UpdateSettingsResponse;
|
||||
vm: VmMutations;
|
||||
};
|
||||
@@ -1290,6 +1495,14 @@ export type MutationCreateDockerFolderArgs = {
|
||||
};
|
||||
|
||||
|
||||
export type MutationCreateDockerFolderWithItemsArgs = {
|
||||
name: Scalars['String']['input'];
|
||||
parentId?: InputMaybe<Scalars['String']['input']>;
|
||||
position?: InputMaybe<Scalars['Float']['input']>;
|
||||
sourceEntryIds?: InputMaybe<Array<Scalars['String']['input']>>;
|
||||
};
|
||||
|
||||
|
||||
export type MutationCreateNotificationArgs = {
|
||||
input: NotificationData;
|
||||
};
|
||||
@@ -1322,11 +1535,29 @@ export type MutationMoveDockerEntriesToFolderArgs = {
|
||||
};
|
||||
|
||||
|
||||
export type MutationMoveDockerItemsToPositionArgs = {
|
||||
destinationFolderId: Scalars['String']['input'];
|
||||
position: Scalars['Float']['input'];
|
||||
sourceEntryIds: Array<Scalars['String']['input']>;
|
||||
};
|
||||
|
||||
|
||||
export type MutationNotifyIfUniqueArgs = {
|
||||
input: NotificationData;
|
||||
};
|
||||
|
||||
|
||||
export type MutationRemovePluginArgs = {
|
||||
input: PluginManagementInput;
|
||||
};
|
||||
|
||||
|
||||
export type MutationRenameDockerFolderArgs = {
|
||||
folderId: Scalars['String']['input'];
|
||||
newName: Scalars['String']['input'];
|
||||
};
|
||||
|
||||
|
||||
export type MutationSetDockerFolderChildrenArgs = {
|
||||
childrenIds: Array<Scalars['String']['input']>;
|
||||
folderId?: InputMaybe<Scalars['String']['input']>;
|
||||
@@ -1358,6 +1589,12 @@ export type MutationUpdateApiSettingsArgs = {
|
||||
};
|
||||
|
||||
|
||||
export type MutationUpdateDockerViewPreferencesArgs = {
|
||||
prefs: Scalars['JSON']['input'];
|
||||
viewId?: InputMaybe<Scalars['String']['input']>;
|
||||
};
|
||||
|
||||
|
||||
export type MutationUpdateSettingsArgs = {
|
||||
input: Scalars['JSON']['input'];
|
||||
};
|
||||
@@ -1433,6 +1670,8 @@ export type Notifications = Node & {
|
||||
list: Array<Notification>;
|
||||
/** A cached overview of the notifications in the system & their severity. */
|
||||
overview: NotificationOverview;
|
||||
/** Deduplicated list of unread warning and alert notifications, sorted latest first. */
|
||||
warningsAndAlerts: Array<Notification>;
|
||||
};
|
||||
|
||||
|
||||
@@ -1498,22 +1737,6 @@ export type OidcSessionValidation = {
|
||||
valid: Scalars['Boolean']['output'];
|
||||
};
|
||||
|
||||
export type OrganizerContainerResource = {
|
||||
__typename?: 'OrganizerContainerResource';
|
||||
id: Scalars['String']['output'];
|
||||
meta?: Maybe<DockerContainer>;
|
||||
name: Scalars['String']['output'];
|
||||
type: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type OrganizerResource = {
|
||||
__typename?: 'OrganizerResource';
|
||||
id: Scalars['String']['output'];
|
||||
meta?: Maybe<Scalars['JSON']['output']>;
|
||||
name: Scalars['String']['output'];
|
||||
type: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type Owner = {
|
||||
__typename?: 'Owner';
|
||||
avatar: Scalars['String']['output'];
|
||||
@@ -1882,16 +2105,6 @@ export type RemoveRoleFromApiKeyInput = {
|
||||
role: Role;
|
||||
};
|
||||
|
||||
export type ResolvedOrganizerEntry = OrganizerContainerResource | OrganizerResource | ResolvedOrganizerFolder;
|
||||
|
||||
export type ResolvedOrganizerFolder = {
|
||||
__typename?: 'ResolvedOrganizerFolder';
|
||||
children: Array<ResolvedOrganizerEntry>;
|
||||
id: Scalars['String']['output'];
|
||||
name: Scalars['String']['output'];
|
||||
type: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
export type ResolvedOrganizerV1 = {
|
||||
__typename?: 'ResolvedOrganizerV1';
|
||||
version: Scalars['Float']['output'];
|
||||
@@ -1900,10 +2113,11 @@ export type ResolvedOrganizerV1 = {
|
||||
|
||||
export type ResolvedOrganizerView = {
|
||||
__typename?: 'ResolvedOrganizerView';
|
||||
flatEntries: Array<FlatOrganizerEntry>;
|
||||
id: Scalars['String']['output'];
|
||||
name: Scalars['String']['output'];
|
||||
prefs?: Maybe<Scalars['JSON']['output']>;
|
||||
root: ResolvedOrganizerEntry;
|
||||
rootId: Scalars['String']['output'];
|
||||
};
|
||||
|
||||
/** Available resources for permissions */
|
||||
@@ -2046,9 +2260,11 @@ export type SsoSettings = Node & {
|
||||
export type Subscription = {
|
||||
__typename?: 'Subscription';
|
||||
arraySubscription: UnraidArray;
|
||||
dockerContainerStats: DockerContainerStats;
|
||||
logFile: LogFileContent;
|
||||
notificationAdded: Notification;
|
||||
notificationsOverview: NotificationOverview;
|
||||
notificationsWarningsAndAlerts: Array<Notification>;
|
||||
ownerSubscription: Owner;
|
||||
parityHistorySubscription: ParityCheck;
|
||||
serversSubscription: Server;
|
||||
@@ -2062,6 +2278,56 @@ export type SubscriptionLogFileArgs = {
|
||||
path: Scalars['String']['input'];
|
||||
};
|
||||
|
||||
/** Tailscale exit node connection status */
|
||||
export type TailscaleExitNodeStatus = {
|
||||
__typename?: 'TailscaleExitNodeStatus';
|
||||
/** Whether the exit node is online */
|
||||
online: Scalars['Boolean']['output'];
|
||||
/** Tailscale IPs of the exit node */
|
||||
tailscaleIps?: Maybe<Array<Scalars['String']['output']>>;
|
||||
};
|
||||
|
||||
/** Tailscale status for a Docker container */
|
||||
export type TailscaleStatus = {
|
||||
__typename?: 'TailscaleStatus';
|
||||
/** Authentication URL if Tailscale needs login */
|
||||
authUrl?: Maybe<Scalars['String']['output']>;
|
||||
/** Tailscale backend state (Running, NeedsLogin, Stopped, etc.) */
|
||||
backendState?: Maybe<Scalars['String']['output']>;
|
||||
/** Actual Tailscale DNS name */
|
||||
dnsName?: Maybe<Scalars['String']['output']>;
|
||||
/** Status of the connected exit node (if using one) */
|
||||
exitNodeStatus?: Maybe<TailscaleExitNodeStatus>;
|
||||
/** Configured Tailscale hostname */
|
||||
hostname?: Maybe<Scalars['String']['output']>;
|
||||
/** Whether this container is an exit node */
|
||||
isExitNode: Scalars['Boolean']['output'];
|
||||
/** Whether the Tailscale key has expired */
|
||||
keyExpired: Scalars['Boolean']['output'];
|
||||
/** Tailscale key expiry date */
|
||||
keyExpiry?: Maybe<Scalars['DateTime']['output']>;
|
||||
/** Days until key expires */
|
||||
keyExpiryDays?: Maybe<Scalars['Int']['output']>;
|
||||
/** Latest available Tailscale version */
|
||||
latestVersion?: Maybe<Scalars['String']['output']>;
|
||||
/** Whether Tailscale is online in the container */
|
||||
online: Scalars['Boolean']['output'];
|
||||
/** Advertised subnet routes */
|
||||
primaryRoutes?: Maybe<Array<Scalars['String']['output']>>;
|
||||
/** DERP relay code */
|
||||
relay?: Maybe<Scalars['String']['output']>;
|
||||
/** DERP relay region name */
|
||||
relayName?: Maybe<Scalars['String']['output']>;
|
||||
/** Tailscale IPv4 and IPv6 addresses */
|
||||
tailscaleIps?: Maybe<Array<Scalars['String']['output']>>;
|
||||
/** Whether a Tailscale update is available */
|
||||
updateAvailable: Scalars['Boolean']['output'];
|
||||
/** Current Tailscale version */
|
||||
version?: Maybe<Scalars['String']['output']>;
|
||||
/** Tailscale Serve/Funnel WebUI URL */
|
||||
webUiUrl?: Maybe<Scalars['String']['output']>;
|
||||
};
|
||||
|
||||
/** Temperature unit */
|
||||
export enum Temperature {
|
||||
CELSIUS = 'CELSIUS',
|
||||
|
||||
@@ -74,13 +74,15 @@ export class InstallPluginCommand extends CommandRunner {
|
||||
|
||||
interface RemovePluginCommandOptions {
|
||||
plugins?: string[];
|
||||
restart: boolean;
|
||||
restart?: boolean;
|
||||
bypassNpm?: boolean;
|
||||
}
|
||||
|
||||
@SubCommand({
|
||||
name: 'remove',
|
||||
aliases: ['rm'],
|
||||
description: 'Remove plugin peer dependencies.',
|
||||
arguments: '[plugins...]',
|
||||
})
|
||||
export class RemovePluginCommand extends CommandRunner {
|
||||
constructor(
|
||||
@@ -93,9 +95,83 @@ export class RemovePluginCommand extends CommandRunner {
|
||||
super();
|
||||
}
|
||||
|
||||
async run(_passedParams: string[], options?: RemovePluginCommandOptions): Promise<void> {
|
||||
async run(passedParams: string[], options?: RemovePluginCommandOptions): Promise<void> {
|
||||
const cliBypass = options?.bypassNpm;
|
||||
const cliRestart = options?.restart;
|
||||
const mergedOptions: RemovePluginCommandOptions = {
|
||||
bypassNpm: cliBypass ?? false,
|
||||
restart: cliRestart ?? true,
|
||||
plugins: passedParams.length > 0 ? passedParams : options?.plugins,
|
||||
};
|
||||
|
||||
let resolvedOptions = mergedOptions;
|
||||
if (!mergedOptions.plugins?.length) {
|
||||
const promptOptions = await this.promptForPlugins(mergedOptions);
|
||||
if (!promptOptions) {
|
||||
return;
|
||||
}
|
||||
resolvedOptions = {
|
||||
// precedence: cli > prompt > default (fallback)
|
||||
bypassNpm: cliBypass ?? promptOptions.bypassNpm ?? mergedOptions.bypassNpm,
|
||||
restart: cliRestart ?? promptOptions.restart ?? mergedOptions.restart,
|
||||
// precedence: prompt > default (fallback)
|
||||
plugins: promptOptions.plugins ?? mergedOptions.plugins,
|
||||
};
|
||||
}
|
||||
|
||||
if (!resolvedOptions.plugins?.length) {
|
||||
this.logService.warn('No plugins selected for removal.');
|
||||
return;
|
||||
}
|
||||
|
||||
if (resolvedOptions.bypassNpm) {
|
||||
await this.pluginManagementService.removePluginConfigOnly(...resolvedOptions.plugins);
|
||||
} else {
|
||||
await this.pluginManagementService.removePlugin(...resolvedOptions.plugins);
|
||||
}
|
||||
for (const plugin of resolvedOptions.plugins) {
|
||||
this.logService.log(`Removed plugin ${plugin}`);
|
||||
}
|
||||
await this.apiConfigPersistence.persist();
|
||||
|
||||
if (resolvedOptions.restart) {
|
||||
await this.restartCommand.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Option({
|
||||
flags: '--no-restart',
|
||||
description: 'do NOT restart the service after deploy',
|
||||
defaultValue: true,
|
||||
})
|
||||
parseRestart(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Option({
|
||||
flags: '-b, --bypass-npm',
|
||||
description: 'Bypass npm uninstall and only update the config',
|
||||
defaultValue: false,
|
||||
name: 'bypassNpm',
|
||||
})
|
||||
parseBypass(): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Option({
|
||||
flags: '--npm',
|
||||
description: 'Run npm uninstall for unbundled plugins (default behavior)',
|
||||
name: 'bypassNpm',
|
||||
})
|
||||
parseRunNpm(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
private async promptForPlugins(
|
||||
initialOptions: RemovePluginCommandOptions
|
||||
): Promise<RemovePluginCommandOptions | undefined> {
|
||||
try {
|
||||
options = await this.inquirerService.prompt(RemovePluginQuestionSet.name, options);
|
||||
return await this.inquirerService.prompt(RemovePluginQuestionSet.name, initialOptions);
|
||||
} catch (error) {
|
||||
if (error instanceof NoPluginsFoundError) {
|
||||
this.logService.error(error.message);
|
||||
@@ -108,30 +184,6 @@ export class RemovePluginCommand extends CommandRunner {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!options.plugins || options.plugins.length === 0) {
|
||||
this.logService.warn('No plugins selected for removal.');
|
||||
return;
|
||||
}
|
||||
|
||||
await this.pluginManagementService.removePlugin(...options.plugins);
|
||||
for (const plugin of options.plugins) {
|
||||
this.logService.log(`Removed plugin ${plugin}`);
|
||||
}
|
||||
await this.apiConfigPersistence.persist();
|
||||
|
||||
if (options.restart) {
|
||||
await this.restartCommand.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Option({
|
||||
flags: '--no-restart',
|
||||
description: 'do NOT restart the service after deploy',
|
||||
defaultValue: true,
|
||||
})
|
||||
parseRestart(): boolean {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import type { ApiConfig } from '@unraid/shared/services/api-config.js';
|
||||
import { ConfigFilePersister } from '@unraid/shared/services/config-file.js';
|
||||
import { csvStringToArray } from '@unraid/shared/util/data.js';
|
||||
|
||||
import { isConnectPluginInstalled } from '@app/connect-plugin-cleanup.js';
|
||||
import { API_VERSION, PATHS_CONFIG_MODULES } from '@app/environment.js';
|
||||
|
||||
export { type ApiConfig };
|
||||
@@ -29,6 +30,13 @@ export const loadApiConfig = async () => {
|
||||
const apiHandler = new ApiConfigPersistence(new ConfigService()).getFileHandler();
|
||||
|
||||
const diskConfig: Partial<ApiConfig> = await apiHandler.loadConfig();
|
||||
// Hack: cleanup stale connect plugin entry if necessary
|
||||
if (!isConnectPluginInstalled()) {
|
||||
diskConfig.plugins = diskConfig.plugins?.filter(
|
||||
(plugin) => plugin !== 'unraid-api-plugin-connect'
|
||||
);
|
||||
await apiHandler.writeConfigFile(diskConfig as ApiConfig);
|
||||
}
|
||||
|
||||
return {
|
||||
...defaultConfig,
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
|
||||
import { CustomizationMutationsResolver } from '@app/unraid-api/graph/resolvers/customization/customization.mutations.resolver.js';
|
||||
import { CustomizationResolver } from '@app/unraid-api/graph/resolvers/customization/customization.resolver.js';
|
||||
import { CustomizationService } from '@app/unraid-api/graph/resolvers/customization/customization.service.js';
|
||||
|
||||
@Module({
|
||||
providers: [CustomizationService, CustomizationResolver],
|
||||
providers: [CustomizationService, CustomizationResolver, CustomizationMutationsResolver],
|
||||
exports: [CustomizationService],
|
||||
})
|
||||
export class CustomizationModule {}
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
import { Args, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { AuthAction, Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
|
||||
import { CustomizationService } from '@app/unraid-api/graph/resolvers/customization/customization.service.js';
|
||||
import { Theme, ThemeName } from '@app/unraid-api/graph/resolvers/customization/theme.model.js';
|
||||
import { CustomizationMutations } from '@app/unraid-api/graph/resolvers/mutation/mutation.model.js';
|
||||
|
||||
@Resolver(() => CustomizationMutations)
|
||||
export class CustomizationMutationsResolver {
|
||||
constructor(private readonly customizationService: CustomizationService) {}
|
||||
|
||||
@ResolveField(() => Theme, { description: 'Update the UI theme (writes dynamix.cfg)' })
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.CUSTOMIZATIONS,
|
||||
})
|
||||
async setTheme(
|
||||
@Args('theme', { type: () => ThemeName, description: 'Theme to apply' })
|
||||
theme: ThemeName
|
||||
): Promise<Theme> {
|
||||
return this.customizationService.setTheme(theme);
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { Test, TestingModule } from '@nestjs/testing';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
|
||||
import type { Mock } from 'vitest';
|
||||
import { plainToInstance } from 'class-transformer';
|
||||
import * as ini from 'ini';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
@@ -1182,4 +1183,58 @@ describe('CustomizationService - updateCfgFile', () => {
|
||||
writeError
|
||||
);
|
||||
});
|
||||
|
||||
describe('getTheme', () => {
|
||||
const mockDynamix = getters.dynamix as unknown as Mock;
|
||||
const baseDisplay = {
|
||||
theme: 'white',
|
||||
banner: '',
|
||||
showBannerGradient: 'no',
|
||||
background: '123456',
|
||||
headerdescription: 'yes',
|
||||
headermetacolor: '789abc',
|
||||
header: 'abcdef',
|
||||
};
|
||||
|
||||
const setDisplay = (overrides: Partial<typeof baseDisplay>) => {
|
||||
mockDynamix.mockReturnValue({
|
||||
display: {
|
||||
...baseDisplay,
|
||||
...overrides,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
it('reports showBannerImage when banner is "image"', async () => {
|
||||
setDisplay({ banner: 'image' });
|
||||
|
||||
const theme = await service.getTheme();
|
||||
|
||||
expect(theme.showBannerImage).toBe(true);
|
||||
});
|
||||
|
||||
it('reports showBannerImage when banner is "yes"', async () => {
|
||||
setDisplay({ banner: 'yes' });
|
||||
|
||||
const theme = await service.getTheme();
|
||||
|
||||
expect(theme.showBannerImage).toBe(true);
|
||||
});
|
||||
|
||||
it('disables showBannerImage when banner is empty', async () => {
|
||||
setDisplay({ banner: '' });
|
||||
|
||||
const theme = await service.getTheme();
|
||||
|
||||
expect(theme.showBannerImage).toBe(false);
|
||||
});
|
||||
|
||||
it('mirrors showBannerGradient flag from display settings', async () => {
|
||||
setDisplay({ banner: 'image', showBannerGradient: 'yes' });
|
||||
expect((await service.getTheme()).showBannerGradient).toBe(true);
|
||||
|
||||
setDisplay({ banner: 'image', showBannerGradient: 'no' });
|
||||
expect((await service.getTheme()).showBannerGradient).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -9,7 +9,9 @@ import * as ini from 'ini';
|
||||
|
||||
import { emcmd } from '@app/core/utils/clients/emcmd.js';
|
||||
import { fileExists } from '@app/core/utils/files/file-exists.js';
|
||||
import { loadDynamixConfigFromDiskSync } from '@app/store/actions/load-dynamix-config-file.js';
|
||||
import { getters, store } from '@app/store/index.js';
|
||||
import { updateDynamixConfig } from '@app/store/modules/dynamix.js';
|
||||
import {
|
||||
ActivationCode,
|
||||
PublicPartnerInfo,
|
||||
@@ -458,7 +460,7 @@ export class CustomizationService implements OnModuleInit {
|
||||
|
||||
return {
|
||||
name,
|
||||
showBannerImage: banner === 'yes',
|
||||
showBannerImage: banner === 'image' || banner === 'yes',
|
||||
showBannerGradient: bannerGradient === 'yes',
|
||||
headerBackgroundColor: this.addHashtoHexField(bgColor),
|
||||
headerPrimaryTextColor: this.addHashtoHexField(textColor),
|
||||
@@ -466,4 +468,16 @@ export class CustomizationService implements OnModuleInit {
|
||||
showHeaderDescription: descriptionShow === 'yes',
|
||||
};
|
||||
}
|
||||
|
||||
public async setTheme(theme: ThemeName): Promise<Theme> {
|
||||
this.logger.log(`Updating theme to ${theme}`);
|
||||
await this.updateCfgFile(this.configFile, 'display', { theme });
|
||||
|
||||
// Refresh in-memory store so subsequent reads get the new theme without a restart
|
||||
const paths = getters.paths();
|
||||
const updatedConfig = loadDynamixConfigFromDiskSync(paths['dynamix-config']);
|
||||
store.dispatch(updateDynamixConfig(updatedConfig));
|
||||
|
||||
return this.getTheme();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/dock
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
|
||||
@Injectable()
|
||||
export class ContainerStatusJob implements OnApplicationBootstrap {
|
||||
export class ContainerStatusJob {
|
||||
private readonly logger = new Logger(ContainerStatusJob.name);
|
||||
constructor(
|
||||
private readonly dockerManifestService: DockerManifestService,
|
||||
@@ -17,8 +17,10 @@ export class ContainerStatusJob implements OnApplicationBootstrap {
|
||||
|
||||
/**
|
||||
* Initialize cron job for refreshing the update status for all containers on a user-configurable schedule.
|
||||
*
|
||||
* Disabled for now to avoid duplication of the webgui's update notifier job (under Notification Settings).
|
||||
*/
|
||||
onApplicationBootstrap() {
|
||||
_disabled_onApplicationBootstrap() {
|
||||
if (!this.dockerConfigService.enabled()) return;
|
||||
const cronExpression = this.dockerConfigService.getConfig().updateCheckCronSchedule;
|
||||
const cronJob = CronJob.from({
|
||||
|
||||
@@ -0,0 +1,141 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerAutostartService } from '@app/unraid-api/graph/resolvers/docker/docker-autostart.service.js';
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
// Mock store getters
|
||||
const mockPaths = {
|
||||
'docker-autostart': '/path/to/docker-autostart',
|
||||
'docker-userprefs': '/path/to/docker-userprefs',
|
||||
};
|
||||
|
||||
vi.mock('@app/store/index.js', () => ({
|
||||
getters: {
|
||||
paths: () => mockPaths,
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock fs/promises
|
||||
const { readFileMock, writeFileMock, unlinkMock } = vi.hoisted(() => ({
|
||||
readFileMock: vi.fn().mockResolvedValue(''),
|
||||
writeFileMock: vi.fn().mockResolvedValue(undefined),
|
||||
unlinkMock: vi.fn().mockResolvedValue(undefined),
|
||||
}));
|
||||
|
||||
vi.mock('fs/promises', () => ({
|
||||
readFile: readFileMock,
|
||||
writeFile: writeFileMock,
|
||||
unlink: unlinkMock,
|
||||
}));
|
||||
|
||||
describe('DockerAutostartService', () => {
|
||||
let service: DockerAutostartService;
|
||||
|
||||
beforeEach(async () => {
|
||||
readFileMock.mockReset();
|
||||
writeFileMock.mockReset();
|
||||
unlinkMock.mockReset();
|
||||
readFileMock.mockResolvedValue('');
|
||||
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [DockerAutostartService],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerAutostartService>(DockerAutostartService);
|
||||
});
|
||||
|
||||
it('should be defined', () => {
|
||||
expect(service).toBeDefined();
|
||||
});
|
||||
|
||||
it('should parse autostart entries correctly', () => {
|
||||
const content = 'container1 10\ncontainer2\ncontainer3 0';
|
||||
const entries = service.parseAutoStartEntries(content);
|
||||
|
||||
expect(entries).toHaveLength(3);
|
||||
expect(entries[0]).toEqual({ name: 'container1', wait: 10, order: 0 });
|
||||
expect(entries[1]).toEqual({ name: 'container2', wait: 0, order: 1 });
|
||||
expect(entries[2]).toEqual({ name: 'container3', wait: 0, order: 2 });
|
||||
});
|
||||
|
||||
it('should refresh autostart entries', async () => {
|
||||
readFileMock.mockResolvedValue('alpha 5');
|
||||
await service.refreshAutoStartEntries();
|
||||
|
||||
const entry = service.getAutoStartEntry('alpha');
|
||||
expect(entry).toBeDefined();
|
||||
expect(entry?.wait).toBe(5);
|
||||
});
|
||||
|
||||
describe('updateAutostartConfiguration', () => {
|
||||
const mockContainers = [
|
||||
{ id: 'c1', names: ['/alpha'] },
|
||||
{ id: 'c2', names: ['/beta'] },
|
||||
] as DockerContainer[];
|
||||
|
||||
it('should update auto-start configuration and persist waits', async () => {
|
||||
await service.updateAutostartConfiguration(
|
||||
[
|
||||
{ id: 'c1', autoStart: true, wait: 15 },
|
||||
{ id: 'c2', autoStart: true, wait: 0 },
|
||||
],
|
||||
mockContainers,
|
||||
{ persistUserPreferences: true }
|
||||
);
|
||||
|
||||
expect(writeFileMock).toHaveBeenCalledWith(
|
||||
mockPaths['docker-autostart'],
|
||||
'alpha 15\nbeta\n',
|
||||
'utf8'
|
||||
);
|
||||
expect(writeFileMock).toHaveBeenCalledWith(
|
||||
mockPaths['docker-userprefs'],
|
||||
'0="alpha"\n1="beta"\n',
|
||||
'utf8'
|
||||
);
|
||||
});
|
||||
|
||||
it('should skip updating user preferences when persist flag is false', async () => {
|
||||
await service.updateAutostartConfiguration(
|
||||
[{ id: 'c1', autoStart: true, wait: 5 }],
|
||||
mockContainers
|
||||
);
|
||||
|
||||
expect(writeFileMock).toHaveBeenCalledWith(
|
||||
mockPaths['docker-autostart'],
|
||||
'alpha 5\n',
|
||||
'utf8'
|
||||
);
|
||||
expect(writeFileMock).not.toHaveBeenCalledWith(
|
||||
mockPaths['docker-userprefs'],
|
||||
expect.any(String),
|
||||
expect.any(String)
|
||||
);
|
||||
});
|
||||
|
||||
it('should remove auto-start file when no containers are configured', async () => {
|
||||
await service.updateAutostartConfiguration(
|
||||
[{ id: 'c1', autoStart: false, wait: 30 }],
|
||||
mockContainers,
|
||||
{ persistUserPreferences: true }
|
||||
);
|
||||
|
||||
expect(unlinkMock).toHaveBeenCalledWith(mockPaths['docker-autostart']);
|
||||
expect(writeFileMock).toHaveBeenCalledWith(
|
||||
mockPaths['docker-userprefs'],
|
||||
'0="alpha"\n',
|
||||
'utf8'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should sanitize autostart wait values', () => {
|
||||
expect(service.sanitizeAutoStartWait(null)).toBe(0);
|
||||
expect(service.sanitizeAutoStartWait(undefined)).toBe(0);
|
||||
expect(service.sanitizeAutoStartWait(10)).toBe(10);
|
||||
expect(service.sanitizeAutoStartWait(-5)).toBe(0);
|
||||
expect(service.sanitizeAutoStartWait(NaN)).toBe(0);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,175 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { readFile, unlink, writeFile } from 'fs/promises';
|
||||
|
||||
import Docker from 'dockerode';
|
||||
|
||||
import { getters } from '@app/store/index.js';
|
||||
import {
|
||||
DockerAutostartEntryInput,
|
||||
DockerContainer,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
export interface AutoStartEntry {
|
||||
name: string;
|
||||
wait: number;
|
||||
order: number;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class DockerAutostartService {
|
||||
private readonly logger = new Logger(DockerAutostartService.name);
|
||||
private autoStartEntries: AutoStartEntry[] = [];
|
||||
private autoStartEntryByName = new Map<string, AutoStartEntry>();
|
||||
|
||||
public getAutoStartEntry(name: string): AutoStartEntry | undefined {
|
||||
return this.autoStartEntryByName.get(name);
|
||||
}
|
||||
|
||||
public setAutoStartEntries(entries: AutoStartEntry[]) {
|
||||
this.autoStartEntries = entries;
|
||||
this.autoStartEntryByName = new Map(entries.map((entry) => [entry.name, entry]));
|
||||
}
|
||||
|
||||
public parseAutoStartEntries(rawContent: string): AutoStartEntry[] {
|
||||
const lines = rawContent
|
||||
.split('\n')
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0);
|
||||
|
||||
const seen = new Set<string>();
|
||||
const entries: AutoStartEntry[] = [];
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
const [name, waitRaw] = line.split(/\s+/);
|
||||
if (!name || seen.has(name)) {
|
||||
return;
|
||||
}
|
||||
const parsedWait = Number.parseInt(waitRaw ?? '', 10);
|
||||
const wait = Number.isFinite(parsedWait) && parsedWait > 0 ? parsedWait : 0;
|
||||
entries.push({
|
||||
name,
|
||||
wait,
|
||||
order: index,
|
||||
});
|
||||
seen.add(name);
|
||||
});
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
public async refreshAutoStartEntries(): Promise<void> {
|
||||
const autoStartPath = getters.paths()['docker-autostart'];
|
||||
const raw = await readFile(autoStartPath, 'utf8')
|
||||
.then((file) => file.toString())
|
||||
.catch(() => '');
|
||||
const entries = this.parseAutoStartEntries(raw);
|
||||
this.setAutoStartEntries(entries);
|
||||
}
|
||||
|
||||
public sanitizeAutoStartWait(wait?: number | null): number {
|
||||
if (wait === null || wait === undefined) return 0;
|
||||
const coerced = Number.isInteger(wait) ? wait : Number.parseInt(String(wait), 10);
|
||||
if (!Number.isFinite(coerced) || coerced < 0) {
|
||||
return 0;
|
||||
}
|
||||
return coerced;
|
||||
}
|
||||
|
||||
public getContainerPrimaryName(container: Docker.ContainerInfo | DockerContainer): string | null {
|
||||
const names =
|
||||
'Names' in container ? container.Names : 'names' in container ? container.names : undefined;
|
||||
const firstName = names?.[0] ?? '';
|
||||
return firstName ? firstName.replace(/^\//, '') : null;
|
||||
}
|
||||
|
||||
private buildUserPreferenceLines(
|
||||
entries: DockerAutostartEntryInput[],
|
||||
containerById: Map<string, DockerContainer>
|
||||
): string[] {
|
||||
const seenNames = new Set<string>();
|
||||
const lines: string[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
const container = containerById.get(entry.id);
|
||||
if (!container) {
|
||||
continue;
|
||||
}
|
||||
const primaryName = this.getContainerPrimaryName(container);
|
||||
if (!primaryName || seenNames.has(primaryName)) {
|
||||
continue;
|
||||
}
|
||||
lines.push(`${lines.length}="${primaryName}"`);
|
||||
seenNames.add(primaryName);
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
/**
|
||||
* Docker auto start file
|
||||
*
|
||||
* @note Doesn't exist if array is offline.
|
||||
* @see https://github.com/limetech/webgui/issues/502#issue-480992547
|
||||
*/
|
||||
public async getAutoStarts(): Promise<string[]> {
|
||||
await this.refreshAutoStartEntries();
|
||||
return this.autoStartEntries.map((entry) => entry.name);
|
||||
}
|
||||
|
||||
public async updateAutostartConfiguration(
|
||||
entries: DockerAutostartEntryInput[],
|
||||
containers: DockerContainer[],
|
||||
options?: { persistUserPreferences?: boolean }
|
||||
): Promise<void> {
|
||||
const containerById = new Map(containers.map((container) => [container.id, container]));
|
||||
const paths = getters.paths();
|
||||
const autoStartPath = paths['docker-autostart'];
|
||||
const userPrefsPath = paths['docker-userprefs'];
|
||||
const persistUserPreferences = Boolean(options?.persistUserPreferences);
|
||||
|
||||
const lines: string[] = [];
|
||||
const seenNames = new Set<string>();
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.autoStart) {
|
||||
continue;
|
||||
}
|
||||
const container = containerById.get(entry.id);
|
||||
if (!container) {
|
||||
continue;
|
||||
}
|
||||
const primaryName = this.getContainerPrimaryName(container);
|
||||
if (!primaryName || seenNames.has(primaryName)) {
|
||||
continue;
|
||||
}
|
||||
const wait = this.sanitizeAutoStartWait(entry.wait);
|
||||
lines.push(wait > 0 ? `${primaryName} ${wait}` : primaryName);
|
||||
seenNames.add(primaryName);
|
||||
}
|
||||
|
||||
if (lines.length) {
|
||||
await writeFile(autoStartPath, `${lines.join('\n')}\n`, 'utf8');
|
||||
} else {
|
||||
await unlink(autoStartPath)?.catch((error: NodeJS.ErrnoException) => {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (persistUserPreferences) {
|
||||
const userPrefsLines = this.buildUserPreferenceLines(entries, containerById);
|
||||
if (userPrefsLines.length) {
|
||||
await writeFile(userPrefsPath, `${userPrefsLines.join('\n')}\n`, 'utf8');
|
||||
} else {
|
||||
await unlink(userPrefsPath)?.catch((error: NodeJS.ErrnoException) => {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
await this.refreshAutoStartEntries();
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,22 @@
|
||||
import { Field, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
import { IsArray, IsObject, IsOptional, IsString } from 'class-validator';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
|
||||
@ObjectType()
|
||||
export class DockerConfig {
|
||||
@Field(() => String)
|
||||
@IsString()
|
||||
updateCheckCronSchedule!: string;
|
||||
|
||||
@Field(() => GraphQLJSON, { nullable: true })
|
||||
@IsOptional()
|
||||
@IsObject()
|
||||
templateMappings?: Record<string, string | null>;
|
||||
|
||||
@Field(() => [String], { nullable: true })
|
||||
@IsOptional()
|
||||
@IsArray()
|
||||
@IsString({ each: true })
|
||||
skipTemplatePaths?: string[];
|
||||
}
|
||||
|
||||
@@ -31,6 +31,8 @@ export class DockerConfigService extends ConfigFilePersister<DockerConfig> {
|
||||
defaultConfig(): DockerConfig {
|
||||
return {
|
||||
updateCheckCronSchedule: CronExpression.EVERY_DAY_AT_6AM,
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: [],
|
||||
};
|
||||
}
|
||||
|
||||
@@ -40,6 +42,7 @@ export class DockerConfigService extends ConfigFilePersister<DockerConfig> {
|
||||
if (!cronExpression.valid) {
|
||||
throw new AppError(`Cron expression not supported: ${dockerConfig.updateCheckCronSchedule}`);
|
||||
}
|
||||
|
||||
return dockerConfig;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,31 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { Mutation, Parent, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
import { Args, Mutation, Parent, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { AuthAction, UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { getLanIp } from '@app/core/utils/network.js';
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerTailscaleService } from '@app/unraid-api/graph/resolvers/docker/docker-tailscale.service.js';
|
||||
import { DockerTemplateScannerService } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.service.js';
|
||||
import {
|
||||
ContainerPort,
|
||||
ContainerPortType,
|
||||
ContainerState,
|
||||
DockerContainer,
|
||||
TailscaleStatus,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
@Resolver(() => DockerContainer)
|
||||
export class DockerContainerResolver {
|
||||
private readonly logger = new Logger(DockerContainerResolver.name);
|
||||
constructor(private readonly dockerManifestService: DockerManifestService) {}
|
||||
constructor(
|
||||
private readonly dockerManifestService: DockerManifestService,
|
||||
private readonly dockerTemplateScannerService: DockerTemplateScannerService,
|
||||
private readonly dockerTailscaleService: DockerTailscaleService
|
||||
) {}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
@@ -39,6 +52,150 @@ export class DockerContainerResolver {
|
||||
return this.dockerManifestService.isRebuildReady(container.hostConfig?.networkMode);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => String, { nullable: true })
|
||||
public async projectUrl(@Parent() container: DockerContainer) {
|
||||
if (!container.templatePath) return null;
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
return details?.project || null;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => String, { nullable: true })
|
||||
public async registryUrl(@Parent() container: DockerContainer) {
|
||||
if (!container.templatePath) return null;
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
return details?.registry || null;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => String, { nullable: true })
|
||||
public async supportUrl(@Parent() container: DockerContainer) {
|
||||
if (!container.templatePath) return null;
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
return details?.support || null;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => String, { nullable: true })
|
||||
public async iconUrl(@Parent() container: DockerContainer) {
|
||||
if (container.labels?.['net.unraid.docker.icon']) {
|
||||
return container.labels['net.unraid.docker.icon'];
|
||||
}
|
||||
if (!container.templatePath) return null;
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
return details?.icon || null;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => String, { nullable: true, description: 'Shell to use for console access' })
|
||||
public async shell(@Parent() container: DockerContainer): Promise<string | null> {
|
||||
if (!container.templatePath) return null;
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
return details?.shell || null;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => [ContainerPort], {
|
||||
nullable: true,
|
||||
description: 'Port mappings from template (used when container is not running)',
|
||||
})
|
||||
public async templatePorts(@Parent() container: DockerContainer): Promise<ContainerPort[] | null> {
|
||||
if (!container.templatePath) return null;
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
if (!details?.ports?.length) return null;
|
||||
|
||||
return details.ports.map((port) => ({
|
||||
privatePort: port.privatePort,
|
||||
publicPort: port.publicPort,
|
||||
type: port.type.toUpperCase() === 'UDP' ? ContainerPortType.UDP : ContainerPortType.TCP,
|
||||
}));
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => String, {
|
||||
nullable: true,
|
||||
description: 'Resolved WebUI URL from template',
|
||||
})
|
||||
public async webUiUrl(@Parent() container: DockerContainer): Promise<string | null> {
|
||||
if (!container.templatePath) return null;
|
||||
|
||||
const details = await this.dockerTemplateScannerService.getTemplateDetails(
|
||||
container.templatePath
|
||||
);
|
||||
|
||||
if (!details?.webUi) return null;
|
||||
|
||||
const lanIp = getLanIp();
|
||||
if (!lanIp) return null;
|
||||
|
||||
let resolvedUrl = details.webUi;
|
||||
|
||||
// Replace [IP] placeholder with LAN IP
|
||||
resolvedUrl = resolvedUrl.replace(/\[IP\]/g, lanIp);
|
||||
|
||||
// Replace [PORT:XXXX] placeholder
|
||||
const portMatch = resolvedUrl.match(/\[PORT:(\d+)\]/);
|
||||
if (portMatch) {
|
||||
const templatePort = parseInt(portMatch[1], 10);
|
||||
let resolvedPort = templatePort;
|
||||
|
||||
// Check if this port is mapped to a public port
|
||||
if (container.ports) {
|
||||
for (const port of container.ports) {
|
||||
if (port.privatePort === templatePort && port.publicPort) {
|
||||
resolvedPort = port.publicPort;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resolvedUrl = resolvedUrl.replace(/\[PORT:\d+\]/g, String(resolvedPort));
|
||||
}
|
||||
|
||||
return resolvedUrl;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
@@ -48,4 +205,65 @@ export class DockerContainerResolver {
|
||||
public async refreshDockerDigests() {
|
||||
return this.dockerManifestService.refreshDigests();
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => Boolean, { description: 'Whether Tailscale is enabled for this container' })
|
||||
public tailscaleEnabled(@Parent() container: DockerContainer): boolean {
|
||||
// Check for Tailscale hostname label (set when hostname is explicitly configured)
|
||||
if (container.labels?.['net.unraid.docker.tailscale.hostname']) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for Tailscale hook mount - look for the source path which is an Unraid system path
|
||||
// The hook is mounted from /usr/local/share/docker/tailscale_container_hook
|
||||
const mounts = container.mounts ?? [];
|
||||
return mounts.some((mount: Record<string, unknown>) => {
|
||||
const source = (mount?.Source ?? mount?.source) as string | undefined;
|
||||
return source?.includes('tailscale_container_hook');
|
||||
});
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => TailscaleStatus, {
|
||||
nullable: true,
|
||||
description: 'Tailscale status for this container (fetched via docker exec)',
|
||||
})
|
||||
public async tailscaleStatus(
|
||||
@Parent() container: DockerContainer,
|
||||
@Args('forceRefresh', { type: () => Boolean, nullable: true, defaultValue: false })
|
||||
forceRefresh: boolean
|
||||
): Promise<TailscaleStatus | null> {
|
||||
// First check if Tailscale is enabled
|
||||
if (!this.tailscaleEnabled(container)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const labels = container.labels ?? {};
|
||||
const hostname = labels['net.unraid.docker.tailscale.hostname'];
|
||||
|
||||
if (container.state !== ContainerState.RUNNING) {
|
||||
return {
|
||||
online: false,
|
||||
hostname: hostname || undefined,
|
||||
isExitNode: false,
|
||||
updateAvailable: false,
|
||||
keyExpired: false,
|
||||
};
|
||||
}
|
||||
|
||||
const containerName = container.names[0];
|
||||
if (!containerName) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return this.dockerTailscaleService.getTailscaleStatus(containerName, labels, forceRefresh);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
import { PassThrough, Readable } from 'stream';
|
||||
import { PassThrough } from 'stream';
|
||||
|
||||
import Docker from 'dockerode';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
// Import pubsub for use in tests
|
||||
@@ -51,6 +49,14 @@ vi.mock('@app/core/pubsub.js', () => ({
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock the docker client utility - this is what the service actually uses
|
||||
const mockDockerClientInstance = {
|
||||
getEvents: vi.fn(),
|
||||
};
|
||||
vi.mock('./utils/docker-client.js', () => ({
|
||||
getDockerClient: vi.fn(() => mockDockerClientInstance),
|
||||
}));
|
||||
|
||||
// Mock DockerService
|
||||
vi.mock('./docker.service.js', () => ({
|
||||
DockerService: vi.fn().mockImplementation(() => ({
|
||||
@@ -63,20 +69,13 @@ vi.mock('./docker.service.js', () => ({
|
||||
describe('DockerEventService', () => {
|
||||
let service: DockerEventService;
|
||||
let dockerService: DockerService;
|
||||
let mockDockerClient: Docker;
|
||||
let mockEventStream: PassThrough;
|
||||
let mockLogger: Logger;
|
||||
let module: TestingModule;
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create a mock Docker client
|
||||
mockDockerClient = {
|
||||
getEvents: vi.fn(),
|
||||
} as unknown as Docker;
|
||||
|
||||
// Create a mock Docker service *instance*
|
||||
const mockDockerServiceImpl = {
|
||||
getDockerClient: vi.fn().mockReturnValue(mockDockerClient),
|
||||
getDockerClient: vi.fn(),
|
||||
clearContainerCache: vi.fn(),
|
||||
getAppInfo: vi.fn().mockResolvedValue({ info: { apps: { installed: 1, running: 1 } } }),
|
||||
};
|
||||
@@ -85,12 +84,7 @@ describe('DockerEventService', () => {
|
||||
mockEventStream = new PassThrough();
|
||||
|
||||
// Set up the mock Docker client to return our mock event stream
|
||||
vi.spyOn(mockDockerClient, 'getEvents').mockResolvedValue(
|
||||
mockEventStream as unknown as Readable
|
||||
);
|
||||
|
||||
// Create a mock logger
|
||||
mockLogger = new Logger(DockerEventService.name) as Logger;
|
||||
mockDockerClientInstance.getEvents = vi.fn().mockResolvedValue(mockEventStream);
|
||||
|
||||
// Use the mock implementation in the testing module
|
||||
module = await Test.createTestingModule({
|
||||
|
||||
@@ -7,6 +7,7 @@ import Docker from 'dockerode';
|
||||
import { pubsub, PUBSUB_CHANNEL } from '@app/core/pubsub.js';
|
||||
import { getters } from '@app/store/index.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { getDockerClient } from '@app/unraid-api/graph/resolvers/docker/utils/docker-client.js';
|
||||
|
||||
enum DockerEventAction {
|
||||
DIE = 'die',
|
||||
@@ -66,7 +67,7 @@ export class DockerEventService implements OnModuleDestroy, OnModuleInit {
|
||||
];
|
||||
|
||||
constructor(private readonly dockerService: DockerService) {
|
||||
this.client = this.dockerService.getDockerClient();
|
||||
this.client = getDockerClient();
|
||||
}
|
||||
|
||||
async onModuleInit() {
|
||||
|
||||
@@ -0,0 +1,143 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { DockerLogService } from '@app/unraid-api/graph/resolvers/docker/docker-log.service.js';
|
||||
|
||||
// Mock dependencies
|
||||
const mockExeca = vi.fn();
|
||||
vi.mock('execa', () => ({
|
||||
execa: (cmd: string, args: string[]) => mockExeca(cmd, args),
|
||||
}));
|
||||
|
||||
const { mockDockerInstance, mockGetContainer, mockContainer } = vi.hoisted(() => {
|
||||
const mockContainer = {
|
||||
inspect: vi.fn(),
|
||||
};
|
||||
const mockGetContainer = vi.fn().mockReturnValue(mockContainer);
|
||||
const mockDockerInstance = {
|
||||
getContainer: mockGetContainer,
|
||||
};
|
||||
return { mockDockerInstance, mockGetContainer, mockContainer };
|
||||
});
|
||||
|
||||
vi.mock('@app/unraid-api/graph/resolvers/docker/utils/docker-client.js', () => ({
|
||||
getDockerClient: vi.fn().mockReturnValue(mockDockerInstance),
|
||||
}));
|
||||
|
||||
const { statMock } = vi.hoisted(() => ({
|
||||
statMock: vi.fn().mockResolvedValue({ size: 0 }),
|
||||
}));
|
||||
|
||||
vi.mock('fs/promises', () => ({
|
||||
stat: statMock,
|
||||
}));
|
||||
|
||||
describe('DockerLogService', () => {
|
||||
let service: DockerLogService;
|
||||
|
||||
beforeEach(async () => {
|
||||
mockExeca.mockReset();
|
||||
mockGetContainer.mockReset();
|
||||
mockGetContainer.mockReturnValue(mockContainer);
|
||||
mockContainer.inspect.mockReset();
|
||||
statMock.mockReset();
|
||||
statMock.mockResolvedValue({ size: 0 });
|
||||
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [DockerLogService],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerLogService>(DockerLogService);
|
||||
});
|
||||
|
||||
it('should be defined', () => {
|
||||
expect(service).toBeDefined();
|
||||
});
|
||||
|
||||
describe('getContainerLogSizes', () => {
|
||||
it('should get container log sizes using dockerode inspect', async () => {
|
||||
mockContainer.inspect.mockResolvedValue({
|
||||
LogPath: '/var/lib/docker/containers/id/id-json.log',
|
||||
});
|
||||
statMock.mockResolvedValue({ size: 1024 });
|
||||
|
||||
const sizes = await service.getContainerLogSizes(['test-container']);
|
||||
|
||||
expect(mockGetContainer).toHaveBeenCalledWith('test-container');
|
||||
expect(mockContainer.inspect).toHaveBeenCalled();
|
||||
expect(statMock).toHaveBeenCalledWith('/var/lib/docker/containers/id/id-json.log');
|
||||
expect(sizes.get('test-container')).toBe(1024);
|
||||
});
|
||||
|
||||
it('should return 0 for missing log path', async () => {
|
||||
mockContainer.inspect.mockResolvedValue({}); // No LogPath
|
||||
|
||||
const sizes = await service.getContainerLogSizes(['test-container']);
|
||||
expect(sizes.get('test-container')).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle inspect errors gracefully', async () => {
|
||||
mockContainer.inspect.mockRejectedValue(new Error('Inspect failed'));
|
||||
|
||||
const sizes = await service.getContainerLogSizes(['test-container']);
|
||||
expect(sizes.get('test-container')).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getContainerLogs', () => {
|
||||
it('should fetch logs via docker CLI', async () => {
|
||||
mockExeca.mockResolvedValue({ stdout: '2023-01-01T00:00:00Z Log message\n' });
|
||||
|
||||
const result = await service.getContainerLogs('test-id');
|
||||
|
||||
expect(mockExeca).toHaveBeenCalledWith('docker', [
|
||||
'logs',
|
||||
'--timestamps',
|
||||
'--tail',
|
||||
'200',
|
||||
'test-id',
|
||||
]);
|
||||
expect(result.lines).toHaveLength(1);
|
||||
expect(result.lines[0].message).toBe('Log message');
|
||||
});
|
||||
|
||||
it('should respect tail option', async () => {
|
||||
mockExeca.mockResolvedValue({ stdout: '' });
|
||||
|
||||
await service.getContainerLogs('test-id', { tail: 50 });
|
||||
|
||||
expect(mockExeca).toHaveBeenCalledWith('docker', [
|
||||
'logs',
|
||||
'--timestamps',
|
||||
'--tail',
|
||||
'50',
|
||||
'test-id',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should respect since option', async () => {
|
||||
mockExeca.mockResolvedValue({ stdout: '' });
|
||||
const since = new Date('2023-01-01T00:00:00Z');
|
||||
|
||||
await service.getContainerLogs('test-id', { since });
|
||||
|
||||
expect(mockExeca).toHaveBeenCalledWith('docker', [
|
||||
'logs',
|
||||
'--timestamps',
|
||||
'--tail',
|
||||
'200',
|
||||
'--since',
|
||||
since.toISOString(),
|
||||
'test-id',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should throw AppError on execa failure', async () => {
|
||||
mockExeca.mockRejectedValue(new Error('Docker error'));
|
||||
|
||||
await expect(service.getContainerLogs('test-id')).rejects.toThrow(AppError);
|
||||
});
|
||||
});
|
||||
});
|
||||
149
api/src/unraid-api/graph/resolvers/docker/docker-log.service.ts
Normal file
149
api/src/unraid-api/graph/resolvers/docker/docker-log.service.ts
Normal file
@@ -0,0 +1,149 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { stat } from 'fs/promises';
|
||||
|
||||
import type { ExecaError } from 'execa';
|
||||
import { execa } from 'execa';
|
||||
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import {
|
||||
DockerContainerLogLine,
|
||||
DockerContainerLogs,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { getDockerClient } from '@app/unraid-api/graph/resolvers/docker/utils/docker-client.js';
|
||||
|
||||
@Injectable()
|
||||
export class DockerLogService {
|
||||
private readonly logger = new Logger(DockerLogService.name);
|
||||
private readonly client = getDockerClient();
|
||||
|
||||
private static readonly DEFAULT_LOG_TAIL = 200;
|
||||
private static readonly MAX_LOG_TAIL = 2000;
|
||||
|
||||
public async getContainerLogSizes(containerNames: string[]): Promise<Map<string, number>> {
|
||||
const logSizes = new Map<string, number>();
|
||||
if (!Array.isArray(containerNames) || containerNames.length === 0) {
|
||||
return logSizes;
|
||||
}
|
||||
|
||||
for (const rawName of containerNames) {
|
||||
const normalized = (rawName ?? '').replace(/^\//, '');
|
||||
if (!normalized) {
|
||||
logSizes.set(normalized, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const container = this.client.getContainer(normalized);
|
||||
const info = await container.inspect();
|
||||
const logPath = info.LogPath;
|
||||
|
||||
if (!logPath || typeof logPath !== 'string' || !logPath.length) {
|
||||
logSizes.set(normalized, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
const stats = await stat(logPath).catch(() => null);
|
||||
logSizes.set(normalized, stats?.size ?? 0);
|
||||
} catch (error) {
|
||||
const message =
|
||||
error instanceof Error ? error.message : String(error ?? 'unknown error');
|
||||
this.logger.debug(
|
||||
`Failed to determine log size for container ${normalized}: ${message}`
|
||||
);
|
||||
logSizes.set(normalized, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return logSizes;
|
||||
}
|
||||
|
||||
public async getContainerLogs(
|
||||
id: string,
|
||||
options?: { since?: Date | null; tail?: number | null }
|
||||
): Promise<DockerContainerLogs> {
|
||||
const normalizedId = (id ?? '').trim();
|
||||
if (!normalizedId) {
|
||||
throw new AppError('Container id is required to fetch logs.', 400);
|
||||
}
|
||||
|
||||
const tail = this.normalizeLogTail(options?.tail);
|
||||
const args = ['logs', '--timestamps', '--tail', String(tail)];
|
||||
const sinceIso = options?.since instanceof Date ? options.since.toISOString() : null;
|
||||
if (sinceIso) {
|
||||
args.push('--since', sinceIso);
|
||||
}
|
||||
args.push(normalizedId);
|
||||
|
||||
try {
|
||||
const { stdout } = await execa('docker', args);
|
||||
const lines = this.parseDockerLogOutput(stdout);
|
||||
const cursor =
|
||||
lines.length > 0 ? lines[lines.length - 1].timestamp : (options?.since ?? null);
|
||||
|
||||
return {
|
||||
containerId: normalizedId,
|
||||
lines,
|
||||
cursor: cursor ?? undefined,
|
||||
};
|
||||
} catch (error: unknown) {
|
||||
const execaError = error as ExecaError;
|
||||
const stderr = typeof execaError?.stderr === 'string' ? execaError.stderr.trim() : '';
|
||||
const message = stderr || execaError?.message || 'Unknown error';
|
||||
this.logger.error(
|
||||
`Failed to fetch logs for container ${normalizedId}: ${message}`,
|
||||
execaError
|
||||
);
|
||||
throw new AppError(`Failed to fetch logs for container ${normalizedId}.`);
|
||||
}
|
||||
}
|
||||
|
||||
private normalizeLogTail(tail?: number | null): number {
|
||||
if (typeof tail !== 'number' || Number.isNaN(tail)) {
|
||||
return DockerLogService.DEFAULT_LOG_TAIL;
|
||||
}
|
||||
const coerced = Math.floor(tail);
|
||||
if (!Number.isFinite(coerced) || coerced <= 0) {
|
||||
return DockerLogService.DEFAULT_LOG_TAIL;
|
||||
}
|
||||
return Math.min(coerced, DockerLogService.MAX_LOG_TAIL);
|
||||
}
|
||||
|
||||
private parseDockerLogOutput(output: string): DockerContainerLogLine[] {
|
||||
if (!output) {
|
||||
return [];
|
||||
}
|
||||
return output
|
||||
.split(/\r?\n/g)
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0)
|
||||
.map((line) => this.parseDockerLogLine(line))
|
||||
.filter((entry): entry is DockerContainerLogLine => Boolean(entry));
|
||||
}
|
||||
|
||||
private parseDockerLogLine(line: string): DockerContainerLogLine | null {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed.length) {
|
||||
return null;
|
||||
}
|
||||
const firstSpaceIndex = trimmed.indexOf(' ');
|
||||
if (firstSpaceIndex === -1) {
|
||||
return {
|
||||
timestamp: new Date(),
|
||||
message: trimmed,
|
||||
};
|
||||
}
|
||||
const potentialTimestamp = trimmed.slice(0, firstSpaceIndex);
|
||||
const message = trimmed.slice(firstSpaceIndex + 1);
|
||||
const parsedTimestamp = new Date(potentialTimestamp);
|
||||
if (Number.isNaN(parsedTimestamp.getTime())) {
|
||||
return {
|
||||
timestamp: new Date(),
|
||||
message: trimmed,
|
||||
};
|
||||
}
|
||||
return {
|
||||
timestamp: parsedTimestamp,
|
||||
message,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,14 @@ export class DockerManifestService {
|
||||
return this.dockerPhpService.refreshDigestsViaPhp();
|
||||
});
|
||||
|
||||
/**
|
||||
* Reads the cached update status file and returns the parsed contents.
|
||||
* Exposed so other services can reuse the parsed data when evaluating many containers.
|
||||
*/
|
||||
async getCachedUpdateStatuses(): Promise<Record<string, CachedStatusEntry>> {
|
||||
return this.dockerPhpService.readCachedUpdateStatus();
|
||||
}
|
||||
|
||||
/**
|
||||
* Recomputes local/remote docker container digests and writes them to /var/lib/docker/unraid-update-status.json
|
||||
* @param mutex - Optional mutex to use for the operation. If not provided, a default mutex will be used.
|
||||
@@ -41,7 +49,22 @@ export class DockerManifestService {
|
||||
cacheData ??= await this.dockerPhpService.readCachedUpdateStatus();
|
||||
const containerData = cacheData[taggedRef];
|
||||
if (!containerData) return null;
|
||||
return containerData.status?.toLowerCase() === 'true';
|
||||
|
||||
const normalize = (digest?: string | null) => {
|
||||
const value = digest?.trim().toLowerCase();
|
||||
return value && value !== 'undef' ? value : null;
|
||||
};
|
||||
|
||||
const localDigest = normalize(containerData.local);
|
||||
const remoteDigest = normalize(containerData.remote);
|
||||
if (localDigest && remoteDigest) {
|
||||
return localDigest !== remoteDigest;
|
||||
}
|
||||
|
||||
const status = containerData.status?.toLowerCase();
|
||||
if (status === 'true') return true;
|
||||
if (status === 'false') return false;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
import { CACHE_MANAGER } from '@nestjs/cache-manager';
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerNetworkService } from '@app/unraid-api/graph/resolvers/docker/docker-network.service.js';
|
||||
|
||||
const { mockDockerInstance, mockListNetworks } = vi.hoisted(() => {
|
||||
const mockListNetworks = vi.fn();
|
||||
const mockDockerInstance = {
|
||||
listNetworks: mockListNetworks,
|
||||
};
|
||||
return { mockDockerInstance, mockListNetworks };
|
||||
});
|
||||
|
||||
vi.mock('@app/unraid-api/graph/resolvers/docker/utils/docker-client.js', () => ({
|
||||
getDockerClient: vi.fn().mockReturnValue(mockDockerInstance),
|
||||
}));
|
||||
|
||||
const mockCacheManager = {
|
||||
get: vi.fn(),
|
||||
set: vi.fn(),
|
||||
};
|
||||
|
||||
describe('DockerNetworkService', () => {
|
||||
let service: DockerNetworkService;
|
||||
|
||||
beforeEach(async () => {
|
||||
mockListNetworks.mockReset();
|
||||
mockCacheManager.get.mockReset();
|
||||
mockCacheManager.set.mockReset();
|
||||
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
DockerNetworkService,
|
||||
{
|
||||
provide: CACHE_MANAGER,
|
||||
useValue: mockCacheManager,
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerNetworkService>(DockerNetworkService);
|
||||
});
|
||||
|
||||
it('should be defined', () => {
|
||||
expect(service).toBeDefined();
|
||||
});
|
||||
|
||||
describe('getNetworks', () => {
|
||||
it('should return cached networks if available and not skipped', async () => {
|
||||
const cached = [{ id: 'net1', name: 'test-net' }];
|
||||
mockCacheManager.get.mockResolvedValue(cached);
|
||||
|
||||
const result = await service.getNetworks({ skipCache: false });
|
||||
expect(result).toEqual(cached);
|
||||
expect(mockListNetworks).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should fetch networks from docker if cache skipped', async () => {
|
||||
const rawNetworks = [
|
||||
{
|
||||
Id: 'net1',
|
||||
Name: 'test-net',
|
||||
Driver: 'bridge',
|
||||
},
|
||||
];
|
||||
mockListNetworks.mockResolvedValue(rawNetworks);
|
||||
|
||||
const result = await service.getNetworks({ skipCache: true });
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe('net1');
|
||||
expect(mockListNetworks).toHaveBeenCalled();
|
||||
expect(mockCacheManager.set).toHaveBeenCalledWith(
|
||||
DockerNetworkService.NETWORK_CACHE_KEY,
|
||||
expect.anything(),
|
||||
expect.anything()
|
||||
);
|
||||
});
|
||||
|
||||
it('should fetch networks from docker if cache miss', async () => {
|
||||
mockCacheManager.get.mockResolvedValue(undefined);
|
||||
mockListNetworks.mockResolvedValue([]);
|
||||
|
||||
await service.getNetworks({ skipCache: false });
|
||||
expect(mockListNetworks).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,69 @@
|
||||
import { CACHE_MANAGER } from '@nestjs/cache-manager';
|
||||
import { Inject, Injectable, Logger } from '@nestjs/common';
|
||||
|
||||
import { type Cache } from 'cache-manager';
|
||||
|
||||
import { catchHandlers } from '@app/core/utils/misc/catch-handlers.js';
|
||||
import { DockerNetwork } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { getDockerClient } from '@app/unraid-api/graph/resolvers/docker/utils/docker-client.js';
|
||||
|
||||
interface NetworkListingOptions {
|
||||
skipCache: boolean;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class DockerNetworkService {
|
||||
private readonly logger = new Logger(DockerNetworkService.name);
|
||||
private readonly client = getDockerClient();
|
||||
|
||||
public static readonly NETWORK_CACHE_KEY = 'docker_networks';
|
||||
private static readonly CACHE_TTL_SECONDS = 60;
|
||||
|
||||
constructor(@Inject(CACHE_MANAGER) private cacheManager: Cache) {}
|
||||
|
||||
/**
|
||||
* Get all Docker networks
|
||||
* @returns All the in/active Docker networks on the system.
|
||||
*/
|
||||
public async getNetworks({ skipCache }: NetworkListingOptions): Promise<DockerNetwork[]> {
|
||||
if (!skipCache) {
|
||||
const cachedNetworks = await this.cacheManager.get<DockerNetwork[]>(
|
||||
DockerNetworkService.NETWORK_CACHE_KEY
|
||||
);
|
||||
if (cachedNetworks) {
|
||||
this.logger.debug('Using docker network cache');
|
||||
return cachedNetworks;
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.debug('Updating docker network cache');
|
||||
const rawNetworks = await this.client.listNetworks().catch(catchHandlers.docker);
|
||||
const networks = rawNetworks.map(
|
||||
(network) =>
|
||||
({
|
||||
name: network.Name || '',
|
||||
id: network.Id || '',
|
||||
created: network.Created || '',
|
||||
scope: network.Scope || '',
|
||||
driver: network.Driver || '',
|
||||
enableIPv6: network.EnableIPv6 || false,
|
||||
ipam: network.IPAM || {},
|
||||
internal: network.Internal || false,
|
||||
attachable: network.Attachable || false,
|
||||
ingress: network.Ingress || false,
|
||||
configFrom: network.ConfigFrom || {},
|
||||
configOnly: network.ConfigOnly || false,
|
||||
containers: network.Containers || {},
|
||||
options: network.Options || {},
|
||||
labels: network.Labels || {},
|
||||
}) as DockerNetwork
|
||||
);
|
||||
|
||||
await this.cacheManager.set(
|
||||
DockerNetworkService.NETWORK_CACHE_KEY,
|
||||
networks,
|
||||
DockerNetworkService.CACHE_TTL_SECONDS * 1000
|
||||
);
|
||||
return networks;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerPortService } from '@app/unraid-api/graph/resolvers/docker/docker-port.service.js';
|
||||
import {
|
||||
ContainerPortType,
|
||||
DockerContainer,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
vi.mock('@app/core/utils/network.js', () => ({
|
||||
getLanIp: vi.fn().mockReturnValue('192.168.1.100'),
|
||||
}));
|
||||
|
||||
describe('DockerPortService', () => {
|
||||
let service: DockerPortService;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [DockerPortService],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerPortService>(DockerPortService);
|
||||
});
|
||||
|
||||
it('should be defined', () => {
|
||||
expect(service).toBeDefined();
|
||||
});
|
||||
|
||||
describe('deduplicateContainerPorts', () => {
|
||||
it('should deduplicate ports', () => {
|
||||
const ports = [
|
||||
{ PrivatePort: 80, PublicPort: 80, Type: 'tcp' },
|
||||
{ PrivatePort: 80, PublicPort: 80, Type: 'tcp' },
|
||||
{ PrivatePort: 443, PublicPort: 443, Type: 'tcp' },
|
||||
];
|
||||
// @ts-expect-error - types are loosely mocked
|
||||
const result = service.deduplicateContainerPorts(ports);
|
||||
expect(result).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('calculateConflicts', () => {
|
||||
it('should detect port conflicts', () => {
|
||||
const containers = [
|
||||
{
|
||||
id: 'c1',
|
||||
names: ['/web1'],
|
||||
ports: [{ privatePort: 80, type: ContainerPortType.TCP }],
|
||||
},
|
||||
{
|
||||
id: 'c2',
|
||||
names: ['/web2'],
|
||||
ports: [{ privatePort: 80, type: ContainerPortType.TCP }],
|
||||
},
|
||||
] as DockerContainer[];
|
||||
|
||||
const result = service.calculateConflicts(containers);
|
||||
expect(result.containerPorts).toHaveLength(1);
|
||||
expect(result.containerPorts[0].privatePort).toBe(80);
|
||||
expect(result.containerPorts[0].containers).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should detect lan port conflicts', () => {
|
||||
const containers = [
|
||||
{
|
||||
id: 'c1',
|
||||
names: ['/web1'],
|
||||
ports: [{ publicPort: 8080, type: ContainerPortType.TCP }],
|
||||
},
|
||||
{
|
||||
id: 'c2',
|
||||
names: ['/web2'],
|
||||
ports: [{ publicPort: 8080, type: ContainerPortType.TCP }],
|
||||
},
|
||||
] as DockerContainer[];
|
||||
|
||||
const result = service.calculateConflicts(containers);
|
||||
expect(result.lanPorts).toHaveLength(1);
|
||||
expect(result.lanPorts[0].publicPort).toBe(8080);
|
||||
expect(result.lanPorts[0].containers).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
178
api/src/unraid-api/graph/resolvers/docker/docker-port.service.ts
Normal file
178
api/src/unraid-api/graph/resolvers/docker/docker-port.service.ts
Normal file
@@ -0,0 +1,178 @@
|
||||
import { Injectable } from '@nestjs/common';
|
||||
|
||||
import Docker from 'dockerode';
|
||||
|
||||
import { getLanIp } from '@app/core/utils/network.js';
|
||||
import {
|
||||
ContainerPortType,
|
||||
DockerContainer,
|
||||
DockerContainerPortConflict,
|
||||
DockerLanPortConflict,
|
||||
DockerPortConflictContainer,
|
||||
DockerPortConflicts,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
@Injectable()
|
||||
export class DockerPortService {
|
||||
public deduplicateContainerPorts(
|
||||
ports: Docker.ContainerInfo['Ports'] | undefined
|
||||
): Docker.ContainerInfo['Ports'] {
|
||||
if (!Array.isArray(ports)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const seen = new Set<string>();
|
||||
const uniquePorts: Docker.ContainerInfo['Ports'] = [];
|
||||
|
||||
for (const port of ports) {
|
||||
const key = `${port.PrivatePort ?? ''}-${port.PublicPort ?? ''}-${(port.Type ?? '').toLowerCase()}`;
|
||||
if (seen.has(key)) {
|
||||
continue;
|
||||
}
|
||||
seen.add(key);
|
||||
uniquePorts.push(port);
|
||||
}
|
||||
|
||||
return uniquePorts;
|
||||
}
|
||||
|
||||
public calculateConflicts(containers: DockerContainer[]): DockerPortConflicts {
|
||||
return {
|
||||
containerPorts: this.buildContainerPortConflicts(containers),
|
||||
lanPorts: this.buildLanPortConflicts(containers),
|
||||
};
|
||||
}
|
||||
|
||||
private buildPortConflictContainerRef(container: DockerContainer): DockerPortConflictContainer {
|
||||
const primaryName = this.getContainerPrimaryName(container);
|
||||
const fallback = container.names?.[0] ?? container.id;
|
||||
const normalized = typeof fallback === 'string' ? fallback.replace(/^\//, '') : container.id;
|
||||
return {
|
||||
id: container.id,
|
||||
name: primaryName || normalized,
|
||||
};
|
||||
}
|
||||
|
||||
private getContainerPrimaryName(container: DockerContainer): string | null {
|
||||
const names = container.names;
|
||||
const firstName = names?.[0] ?? '';
|
||||
return firstName ? firstName.replace(/^\//, '') : null;
|
||||
}
|
||||
|
||||
private buildContainerPortConflicts(containers: DockerContainer[]): DockerContainerPortConflict[] {
|
||||
const groups = new Map<
|
||||
string,
|
||||
{
|
||||
privatePort: number;
|
||||
type: ContainerPortType;
|
||||
containers: DockerContainer[];
|
||||
seen: Set<string>;
|
||||
}
|
||||
>();
|
||||
|
||||
for (const container of containers) {
|
||||
if (!Array.isArray(container.ports)) {
|
||||
continue;
|
||||
}
|
||||
for (const port of container.ports) {
|
||||
if (!port || typeof port.privatePort !== 'number') {
|
||||
continue;
|
||||
}
|
||||
const type = port.type ?? ContainerPortType.TCP;
|
||||
const key = `${port.privatePort}/${type}`;
|
||||
let group = groups.get(key);
|
||||
if (!group) {
|
||||
group = {
|
||||
privatePort: port.privatePort,
|
||||
type,
|
||||
containers: [],
|
||||
seen: new Set<string>(),
|
||||
};
|
||||
groups.set(key, group);
|
||||
}
|
||||
if (group.seen.has(container.id)) {
|
||||
continue;
|
||||
}
|
||||
group.seen.add(container.id);
|
||||
group.containers.push(container);
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(groups.values())
|
||||
.filter((group) => group.containers.length > 1)
|
||||
.map((group) => ({
|
||||
privatePort: group.privatePort,
|
||||
type: group.type,
|
||||
containers: group.containers.map((container) =>
|
||||
this.buildPortConflictContainerRef(container)
|
||||
),
|
||||
}))
|
||||
.sort((a, b) => {
|
||||
if (a.privatePort !== b.privatePort) {
|
||||
return a.privatePort - b.privatePort;
|
||||
}
|
||||
return a.type.localeCompare(b.type);
|
||||
});
|
||||
}
|
||||
|
||||
private buildLanPortConflicts(containers: DockerContainer[]): DockerLanPortConflict[] {
|
||||
const lanIp = getLanIp();
|
||||
const groups = new Map<
|
||||
string,
|
||||
{
|
||||
lanIpPort: string;
|
||||
publicPort: number;
|
||||
type: ContainerPortType;
|
||||
containers: DockerContainer[];
|
||||
seen: Set<string>;
|
||||
}
|
||||
>();
|
||||
|
||||
for (const container of containers) {
|
||||
if (!Array.isArray(container.ports)) {
|
||||
continue;
|
||||
}
|
||||
for (const port of container.ports) {
|
||||
if (!port || typeof port.publicPort !== 'number') {
|
||||
continue;
|
||||
}
|
||||
const type = port.type ?? ContainerPortType.TCP;
|
||||
const lanIpPort = lanIp ? `${lanIp}:${port.publicPort}` : `${port.publicPort}`;
|
||||
const key = `${lanIpPort}/${type}`;
|
||||
let group = groups.get(key);
|
||||
if (!group) {
|
||||
group = {
|
||||
lanIpPort,
|
||||
publicPort: port.publicPort,
|
||||
type,
|
||||
containers: [],
|
||||
seen: new Set<string>(),
|
||||
};
|
||||
groups.set(key, group);
|
||||
}
|
||||
if (group.seen.has(container.id)) {
|
||||
continue;
|
||||
}
|
||||
group.seen.add(container.id);
|
||||
group.containers.push(container);
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(groups.values())
|
||||
.filter((group) => group.containers.length > 1)
|
||||
.map((group) => ({
|
||||
lanIpPort: group.lanIpPort,
|
||||
publicPort: group.publicPort,
|
||||
type: group.type,
|
||||
containers: group.containers.map((container) =>
|
||||
this.buildPortConflictContainerRef(container)
|
||||
),
|
||||
}))
|
||||
.sort((a, b) => {
|
||||
if ((a.publicPort ?? 0) !== (b.publicPort ?? 0)) {
|
||||
return (a.publicPort ?? 0) - (b.publicPort ?? 0);
|
||||
}
|
||||
return a.type.localeCompare(b.type);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
import { Injectable, Logger, OnModuleDestroy } from '@nestjs/common';
|
||||
import { createInterface } from 'readline';
|
||||
|
||||
import { execa } from 'execa';
|
||||
|
||||
import { pubsub, PUBSUB_CHANNEL } from '@app/core/pubsub.js';
|
||||
import { catchHandlers } from '@app/core/utils/misc/catch-handlers.js';
|
||||
import { DockerContainerStats } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
@Injectable()
|
||||
export class DockerStatsService implements OnModuleDestroy {
|
||||
private readonly logger = new Logger(DockerStatsService.name);
|
||||
private statsProcess: ReturnType<typeof execa> | null = null;
|
||||
private readonly STATS_FORMAT =
|
||||
'{{.ID}};{{.CPUPerc}};{{.MemUsage}};{{.MemPerc}};{{.NetIO}};{{.BlockIO}}';
|
||||
|
||||
onModuleDestroy() {
|
||||
this.stopStatsStream();
|
||||
}
|
||||
|
||||
public startStatsStream() {
|
||||
if (this.statsProcess) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.log('Starting docker stats stream');
|
||||
|
||||
try {
|
||||
this.statsProcess = execa('docker', ['stats', '--format', this.STATS_FORMAT, '--no-trunc'], {
|
||||
all: true,
|
||||
reject: false, // Don't throw on exit code != 0, handle via parsing/events
|
||||
});
|
||||
|
||||
if (this.statsProcess.stdout) {
|
||||
const rl = createInterface({
|
||||
input: this.statsProcess.stdout,
|
||||
crlfDelay: Infinity,
|
||||
});
|
||||
|
||||
rl.on('line', (line) => {
|
||||
if (!line.trim()) return;
|
||||
this.processStatsLine(line);
|
||||
});
|
||||
|
||||
rl.on('error', (err) => {
|
||||
this.logger.error('Error reading docker stats stream', err);
|
||||
});
|
||||
}
|
||||
|
||||
if (this.statsProcess.stderr) {
|
||||
this.statsProcess.stderr.on('data', (data: Buffer) => {
|
||||
// Log docker stats errors but don't crash
|
||||
this.logger.debug(`Docker stats stderr: ${data.toString()}`);
|
||||
});
|
||||
}
|
||||
|
||||
// Handle process exit
|
||||
this.statsProcess
|
||||
.then((result) => {
|
||||
if (result.failed && !result.signal) {
|
||||
this.logger.error('Docker stats process exited with error', result.shortMessage);
|
||||
this.stopStatsStream();
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
if (!err.killed) {
|
||||
this.logger.error('Docker stats process ended unexpectedly', err);
|
||||
this.stopStatsStream();
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to start docker stats', error);
|
||||
catchHandlers.docker(error as Error);
|
||||
}
|
||||
}
|
||||
|
||||
public stopStatsStream() {
|
||||
if (this.statsProcess) {
|
||||
this.logger.log('Stopping docker stats stream');
|
||||
this.statsProcess.kill();
|
||||
this.statsProcess = null;
|
||||
}
|
||||
}
|
||||
|
||||
private processStatsLine(line: string) {
|
||||
try {
|
||||
// format: ID;CPUPerc;MemUsage;MemPerc;NetIO;BlockIO
|
||||
// Example: 123abcde;0.00%;10MiB / 100MiB;10.00%;1kB / 2kB;0B / 0B
|
||||
|
||||
// Remove ANSI escape codes if any (docker stats sometimes includes them)
|
||||
// eslint-disable-next-line no-control-regex
|
||||
const cleanLine = line.replace(/\x1B\[[0-9;]*[mK]/g, '');
|
||||
|
||||
const parts = cleanLine.split(';');
|
||||
if (parts.length < 6) return;
|
||||
|
||||
const [id, cpuPercStr, memUsage, memPercStr, netIO, blockIO] = parts;
|
||||
|
||||
const stats: DockerContainerStats = {
|
||||
id,
|
||||
cpuPercent: this.parsePercentage(cpuPercStr),
|
||||
memUsage,
|
||||
memPercent: this.parsePercentage(memPercStr),
|
||||
netIO,
|
||||
blockIO,
|
||||
};
|
||||
|
||||
pubsub.publish(PUBSUB_CHANNEL.DOCKER_STATS, { dockerContainerStats: stats });
|
||||
} catch (error) {
|
||||
this.logger.debug(`Failed to process stats line: ${line}`, error);
|
||||
}
|
||||
}
|
||||
|
||||
private parsePercentage(value: string): number {
|
||||
return parseFloat(value.replace('%', '')) || 0;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,357 @@
|
||||
import { CACHE_MANAGER } from '@nestjs/cache-manager';
|
||||
import { Inject, Injectable, Logger } from '@nestjs/common';
|
||||
|
||||
import { type Cache } from 'cache-manager';
|
||||
|
||||
import { TailscaleStatus } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { getDockerClient } from '@app/unraid-api/graph/resolvers/docker/utils/docker-client.js';
|
||||
|
||||
interface RawTailscaleStatus {
|
||||
Self: {
|
||||
Online: boolean;
|
||||
DNSName: string;
|
||||
TailscaleIPs?: string[];
|
||||
Relay?: string;
|
||||
PrimaryRoutes?: string[];
|
||||
ExitNodeOption?: boolean;
|
||||
KeyExpiry?: string;
|
||||
};
|
||||
ExitNodeStatus?: {
|
||||
Online: boolean;
|
||||
TailscaleIPs?: string[];
|
||||
};
|
||||
Version: string;
|
||||
BackendState?: string;
|
||||
AuthURL?: string;
|
||||
}
|
||||
|
||||
interface DerpRegion {
|
||||
RegionCode: string;
|
||||
RegionName: string;
|
||||
}
|
||||
|
||||
interface DerpMap {
|
||||
Regions: Record<string, DerpRegion>;
|
||||
}
|
||||
|
||||
interface TailscaleVersionResponse {
|
||||
TarballsVersion: string;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class DockerTailscaleService {
|
||||
private readonly logger = new Logger(DockerTailscaleService.name);
|
||||
private readonly docker = getDockerClient();
|
||||
|
||||
private static readonly DERP_MAP_CACHE_KEY = 'tailscale_derp_map';
|
||||
private static readonly VERSION_CACHE_KEY = 'tailscale_latest_version';
|
||||
private static readonly STATUS_CACHE_PREFIX = 'tailscale_status_';
|
||||
private static readonly DERP_MAP_TTL = 86400000; // 24 hours in ms
|
||||
private static readonly VERSION_TTL = 86400000; // 24 hours in ms
|
||||
private static readonly STATUS_TTL = 30000; // 30 seconds in ms
|
||||
|
||||
constructor(@Inject(CACHE_MANAGER) private cacheManager: Cache) {}
|
||||
|
||||
async getTailscaleStatus(
|
||||
containerName: string,
|
||||
labels: Record<string, string>,
|
||||
forceRefresh = false
|
||||
): Promise<TailscaleStatus | null> {
|
||||
const hostname = labels['net.unraid.docker.tailscale.hostname'];
|
||||
const webUiTemplate = labels['net.unraid.docker.tailscale.webui'];
|
||||
|
||||
const cacheKey = `${DockerTailscaleService.STATUS_CACHE_PREFIX}${containerName}`;
|
||||
|
||||
if (forceRefresh) {
|
||||
await this.cacheManager.del(cacheKey);
|
||||
} else {
|
||||
const cached = await this.cacheManager.get<TailscaleStatus>(cacheKey);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
|
||||
const rawStatus = await this.execTailscaleStatus(containerName);
|
||||
if (!rawStatus) {
|
||||
// Don't cache failures - return without caching so next request retries
|
||||
return {
|
||||
online: false,
|
||||
hostname: hostname || undefined,
|
||||
isExitNode: false,
|
||||
updateAvailable: false,
|
||||
keyExpired: false,
|
||||
};
|
||||
}
|
||||
|
||||
const [derpMap, latestVersion] = await Promise.all([this.getDerpMap(), this.getLatestVersion()]);
|
||||
|
||||
const version = rawStatus.Version?.split('-')[0];
|
||||
const updateAvailable = Boolean(
|
||||
version && latestVersion && this.isVersionLessThan(version, latestVersion)
|
||||
);
|
||||
|
||||
const dnsName = rawStatus.Self.DNSName;
|
||||
|
||||
let relayName: string | undefined;
|
||||
if (rawStatus.Self.Relay && derpMap) {
|
||||
relayName = this.mapRelayToRegion(rawStatus.Self.Relay, derpMap);
|
||||
}
|
||||
|
||||
let keyExpiry: Date | undefined;
|
||||
let keyExpiryDays: number | undefined;
|
||||
let keyExpired = false;
|
||||
|
||||
if (rawStatus.Self.KeyExpiry) {
|
||||
keyExpiry = new Date(rawStatus.Self.KeyExpiry);
|
||||
const now = new Date();
|
||||
const diffMs = keyExpiry.getTime() - now.getTime();
|
||||
keyExpiryDays = Math.floor(diffMs / (1000 * 60 * 60 * 24));
|
||||
keyExpired = diffMs < 0;
|
||||
}
|
||||
|
||||
const webUiUrl = webUiTemplate ? this.resolveWebUiUrl(webUiTemplate, rawStatus) : undefined;
|
||||
|
||||
const status: TailscaleStatus = {
|
||||
online: rawStatus.Self.Online,
|
||||
version,
|
||||
latestVersion: latestVersion ?? undefined,
|
||||
updateAvailable,
|
||||
hostname,
|
||||
dnsName: dnsName || undefined,
|
||||
relay: rawStatus.Self.Relay,
|
||||
relayName,
|
||||
tailscaleIps: rawStatus.Self.TailscaleIPs,
|
||||
primaryRoutes: rawStatus.Self.PrimaryRoutes,
|
||||
isExitNode: Boolean(rawStatus.Self.ExitNodeOption),
|
||||
exitNodeStatus: rawStatus.ExitNodeStatus
|
||||
? {
|
||||
online: rawStatus.ExitNodeStatus.Online,
|
||||
tailscaleIps: rawStatus.ExitNodeStatus.TailscaleIPs,
|
||||
}
|
||||
: undefined,
|
||||
webUiUrl,
|
||||
keyExpiry,
|
||||
keyExpiryDays,
|
||||
keyExpired,
|
||||
backendState: rawStatus.BackendState,
|
||||
authUrl: rawStatus.AuthURL,
|
||||
};
|
||||
|
||||
await this.cacheManager.set(cacheKey, status, DockerTailscaleService.STATUS_TTL);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
async getDerpMap(): Promise<DerpMap | null> {
|
||||
const cached = await this.cacheManager.get<DerpMap>(DockerTailscaleService.DERP_MAP_CACHE_KEY);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('https://login.tailscale.com/derpmap/default', {
|
||||
signal: AbortSignal.timeout(3000),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
this.logger.warn(`Failed to fetch DERP map: ${response.status}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const data = (await response.json()) as DerpMap;
|
||||
await this.cacheManager.set(
|
||||
DockerTailscaleService.DERP_MAP_CACHE_KEY,
|
||||
data,
|
||||
DockerTailscaleService.DERP_MAP_TTL
|
||||
);
|
||||
return data;
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to fetch DERP map', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async getLatestVersion(): Promise<string | null> {
|
||||
const cached = await this.cacheManager.get<string>(DockerTailscaleService.VERSION_CACHE_KEY);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('https://pkgs.tailscale.com/stable/?mode=json', {
|
||||
signal: AbortSignal.timeout(3000),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
this.logger.warn(`Failed to fetch Tailscale version: ${response.status}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const data = (await response.json()) as TailscaleVersionResponse;
|
||||
const version = data.TarballsVersion;
|
||||
await this.cacheManager.set(
|
||||
DockerTailscaleService.VERSION_CACHE_KEY,
|
||||
version,
|
||||
DockerTailscaleService.VERSION_TTL
|
||||
);
|
||||
return version;
|
||||
} catch (error) {
|
||||
this.logger.warn('Failed to fetch Tailscale version', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private async execTailscaleStatus(containerName: string): Promise<RawTailscaleStatus | null> {
|
||||
try {
|
||||
const cleanName = containerName.replace(/^\//, '');
|
||||
const container = this.docker.getContainer(cleanName);
|
||||
|
||||
const exec = await container.exec({
|
||||
Cmd: ['/bin/sh', '-c', 'tailscale status --json'],
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
});
|
||||
|
||||
const stream = await exec.start({ hijack: true, stdin: false });
|
||||
const output = await this.collectStreamOutput(stream);
|
||||
|
||||
this.logger.debug(`Raw tailscale output for ${cleanName}: ${output.substring(0, 500)}...`);
|
||||
|
||||
if (!output.trim()) {
|
||||
this.logger.warn(`Empty tailscale output for ${cleanName}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(output) as RawTailscaleStatus;
|
||||
this.logger.debug(
|
||||
`Parsed tailscale status for ${cleanName}: DNSName=${parsed.Self?.DNSName}, Online=${parsed.Self?.Online}`
|
||||
);
|
||||
return parsed;
|
||||
} catch (error) {
|
||||
this.logger.debug(`Failed to get Tailscale status for ${containerName}: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private async collectStreamOutput(stream: NodeJS.ReadableStream): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
stream.on('data', (chunk: Buffer) => {
|
||||
chunks.push(chunk);
|
||||
});
|
||||
stream.on('end', () => {
|
||||
const buffer = Buffer.concat(chunks);
|
||||
const output = this.demuxDockerStream(buffer);
|
||||
resolve(output);
|
||||
});
|
||||
stream.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
private demuxDockerStream(buffer: Buffer): string {
|
||||
// Check if the buffer looks like it starts with JSON (not multiplexed)
|
||||
// Docker multiplexed streams start with stream type byte (0, 1, or 2)
|
||||
// followed by 3 zero bytes, then 4-byte size
|
||||
if (buffer.length > 0) {
|
||||
const firstChar = buffer.toString('utf8', 0, 1);
|
||||
if (firstChar === '{' || firstChar === '[') {
|
||||
// Already plain text/JSON, not multiplexed
|
||||
return buffer.toString('utf8');
|
||||
}
|
||||
}
|
||||
|
||||
let offset = 0;
|
||||
const output: string[] = [];
|
||||
|
||||
while (offset < buffer.length) {
|
||||
if (offset + 8 > buffer.length) break;
|
||||
|
||||
const streamType = buffer.readUInt8(offset);
|
||||
// Valid stream types are 0 (stdin), 1 (stdout), 2 (stderr)
|
||||
if (streamType > 2) {
|
||||
// Doesn't look like multiplexed stream, treat as raw
|
||||
return buffer.toString('utf8');
|
||||
}
|
||||
|
||||
const size = buffer.readUInt32BE(offset + 4);
|
||||
offset += 8;
|
||||
|
||||
if (offset + size > buffer.length) break;
|
||||
|
||||
const chunk = buffer.slice(offset, offset + size).toString('utf8');
|
||||
output.push(chunk);
|
||||
offset += size;
|
||||
}
|
||||
|
||||
return output.join('');
|
||||
}
|
||||
|
||||
private mapRelayToRegion(relayCode: string, derpMap: DerpMap): string | undefined {
|
||||
for (const region of Object.values(derpMap.Regions)) {
|
||||
if (region.RegionCode === relayCode) {
|
||||
return region.RegionName;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
private isVersionLessThan(current: string, latest: string): boolean {
|
||||
const currentParts = current.split('.').map(Number);
|
||||
const latestParts = latest.split('.').map(Number);
|
||||
|
||||
for (let i = 0; i < Math.max(currentParts.length, latestParts.length); i++) {
|
||||
const curr = currentParts[i] || 0;
|
||||
const lat = latestParts[i] || 0;
|
||||
if (curr < lat) return true;
|
||||
if (curr > lat) return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private resolveWebUiUrl(template: string, status: RawTailscaleStatus): string | undefined {
|
||||
if (!template) return undefined;
|
||||
|
||||
let url = template;
|
||||
const dnsName = status.Self.DNSName?.replace(/\.$/, '');
|
||||
|
||||
// Handle [hostname][magicdns] or [hostname] - use MagicDNS name and port 443
|
||||
if (url.includes('[hostname]')) {
|
||||
if (dnsName) {
|
||||
// Replace [hostname][magicdns] with the full DNS name
|
||||
url = url.replace('[hostname][magicdns]', dnsName);
|
||||
// Replace standalone [hostname] with the DNS name
|
||||
url = url.replace('[hostname]', dnsName);
|
||||
// When using MagicDNS, also replace [IP] with DNS name
|
||||
url = url.replace(/\[IP\]/g, dnsName);
|
||||
// When using MagicDNS with Serve/Funnel, port is always 443
|
||||
url = url.replace(/\[PORT:\d+\]/g, '443');
|
||||
} else {
|
||||
// DNS name not available, can't resolve
|
||||
return undefined;
|
||||
}
|
||||
} else if (url.includes('[noserve]')) {
|
||||
// Handle [noserve] - use direct Tailscale IP
|
||||
const ipv4 = status.Self.TailscaleIPs?.find((ip) => !ip.includes(':'));
|
||||
if (ipv4) {
|
||||
const portMatch = template.match(/\[PORT:(\d+)\]/);
|
||||
const port = portMatch ? `:${portMatch[1]}` : '';
|
||||
url = `http://${ipv4}${port}`;
|
||||
} else {
|
||||
return undefined;
|
||||
}
|
||||
} else {
|
||||
// Custom URL - just do basic replacements
|
||||
if (url.includes('[IP]') && status.Self.TailscaleIPs?.[0]) {
|
||||
const ipv4 = status.Self.TailscaleIPs.find((ip) => !ip.includes(':'));
|
||||
url = url.replace(/\[IP\]/g, ipv4 || status.Self.TailscaleIPs[0]);
|
||||
}
|
||||
|
||||
const portMatch = url.match(/\[PORT:(\d+)\]/);
|
||||
if (portMatch) {
|
||||
url = url.replace(portMatch[0], portMatch[1]);
|
||||
}
|
||||
}
|
||||
|
||||
return url;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { readFile } from 'fs/promises';
|
||||
|
||||
import { XMLParser } from 'fast-xml-parser';
|
||||
|
||||
@Injectable()
|
||||
export class DockerTemplateIconService {
|
||||
private readonly logger = new Logger(DockerTemplateIconService.name);
|
||||
private readonly xmlParser = new XMLParser({
|
||||
ignoreAttributes: false,
|
||||
parseAttributeValue: true,
|
||||
trimValues: true,
|
||||
});
|
||||
|
||||
async getIconFromTemplate(templatePath: string): Promise<string | null> {
|
||||
try {
|
||||
const content = await readFile(templatePath, 'utf-8');
|
||||
const parsed = this.xmlParser.parse(content);
|
||||
|
||||
if (!parsed.Container) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return parsed.Container.Icon || null;
|
||||
} catch (error) {
|
||||
this.logger.debug(
|
||||
`Failed to read icon from template ${templatePath}: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async getIconsForContainers(
|
||||
containers: Array<{ id: string; templatePath?: string }>
|
||||
): Promise<Map<string, string>> {
|
||||
const iconMap = new Map<string, string>();
|
||||
|
||||
const iconPromises = containers.map(async (container) => {
|
||||
if (!container.templatePath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const icon = await this.getIconFromTemplate(container.templatePath);
|
||||
if (icon) {
|
||||
return { id: container.id, icon };
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
const results = await Promise.all(iconPromises);
|
||||
|
||||
for (const result of results) {
|
||||
if (result) {
|
||||
iconMap.set(result.id, result.icon);
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.debug(`Loaded ${iconMap.size} icons from ${containers.length} containers`);
|
||||
return iconMap;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
import { Field, Int, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
@ObjectType()
|
||||
export class DockerTemplateSyncResult {
|
||||
@Field(() => Int)
|
||||
scanned!: number;
|
||||
|
||||
@Field(() => Int)
|
||||
matched!: number;
|
||||
|
||||
@Field(() => Int)
|
||||
skipped!: number;
|
||||
|
||||
@Field(() => [String])
|
||||
errors!: string[];
|
||||
}
|
||||
@@ -0,0 +1,425 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
import { mkdir, rm, writeFile } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerTemplateScannerService } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.service.js';
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
|
||||
vi.mock('@app/environment.js', () => ({
|
||||
PATHS_DOCKER_TEMPLATES: ['/tmp/test-templates'],
|
||||
ENABLE_NEXT_DOCKER_RELEASE: true,
|
||||
}));
|
||||
|
||||
describe('DockerTemplateScannerService', () => {
|
||||
let service: DockerTemplateScannerService;
|
||||
let dockerConfigService: DockerConfigService;
|
||||
let dockerService: DockerService;
|
||||
const testTemplateDir = '/tmp/test-templates';
|
||||
|
||||
beforeEach(async () => {
|
||||
await mkdir(testTemplateDir, { recursive: true });
|
||||
|
||||
const mockDockerService = {
|
||||
getContainers: vi.fn(),
|
||||
};
|
||||
|
||||
const mockDockerConfigService = {
|
||||
getConfig: vi.fn(),
|
||||
replaceConfig: vi.fn(),
|
||||
validate: vi.fn((config) => Promise.resolve(config)),
|
||||
};
|
||||
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
DockerTemplateScannerService,
|
||||
{
|
||||
provide: DockerConfigService,
|
||||
useValue: mockDockerConfigService,
|
||||
},
|
||||
{
|
||||
provide: DockerService,
|
||||
useValue: mockDockerService,
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerTemplateScannerService>(DockerTemplateScannerService);
|
||||
dockerConfigService = module.get<DockerConfigService>(DockerConfigService);
|
||||
dockerService = module.get<DockerService>(DockerService);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await rm(testTemplateDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe('parseTemplate', () => {
|
||||
it('should parse valid XML template', async () => {
|
||||
const templatePath = join(testTemplateDir, 'test.xml');
|
||||
const templateContent = `<?xml version="1.0"?>
|
||||
<Container version="2">
|
||||
<Name>test-container</Name>
|
||||
<Repository>test/image</Repository>
|
||||
</Container>`;
|
||||
await writeFile(templatePath, templateContent);
|
||||
|
||||
const result = await (service as any).parseTemplate(templatePath);
|
||||
|
||||
expect(result).toEqual({
|
||||
filePath: templatePath,
|
||||
name: 'test-container',
|
||||
repository: 'test/image',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle invalid XML gracefully by returning null', async () => {
|
||||
const templatePath = join(testTemplateDir, 'invalid.xml');
|
||||
await writeFile(templatePath, 'not xml');
|
||||
|
||||
const result = await (service as any).parseTemplate(templatePath);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for XML without Container element', async () => {
|
||||
const templatePath = join(testTemplateDir, 'no-container.xml');
|
||||
const templateContent = `<?xml version="1.0"?><Root></Root>`;
|
||||
await writeFile(templatePath, templateContent);
|
||||
|
||||
const result = await (service as any).parseTemplate(templatePath);
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('matchContainerToTemplate', () => {
|
||||
it('should match by container name (exact match)', () => {
|
||||
const container: DockerContainer = {
|
||||
id: 'abc123',
|
||||
names: ['/test-container'],
|
||||
image: 'different/image:latest',
|
||||
} as DockerContainer;
|
||||
|
||||
const templates = [
|
||||
{ filePath: '/path/1', name: 'test-container', repository: 'some/repo' },
|
||||
{ filePath: '/path/2', name: 'other', repository: 'other/repo' },
|
||||
];
|
||||
|
||||
const result = (service as any).matchContainerToTemplate(container, templates);
|
||||
|
||||
expect(result).toEqual(templates[0]);
|
||||
});
|
||||
|
||||
it('should match by repository when name does not match', () => {
|
||||
const container: DockerContainer = {
|
||||
id: 'abc123',
|
||||
names: ['/my-container'],
|
||||
image: 'test/image:v1.0',
|
||||
} as DockerContainer;
|
||||
|
||||
const templates = [
|
||||
{ filePath: '/path/1', name: 'different', repository: 'other/repo' },
|
||||
{ filePath: '/path/2', name: 'also-different', repository: 'test/image' },
|
||||
];
|
||||
|
||||
const result = (service as any).matchContainerToTemplate(container, templates);
|
||||
|
||||
expect(result).toEqual(templates[1]);
|
||||
});
|
||||
|
||||
it('should strip tags when matching repository', () => {
|
||||
const container: DockerContainer = {
|
||||
id: 'abc123',
|
||||
names: ['/my-container'],
|
||||
image: 'test/image:latest',
|
||||
} as DockerContainer;
|
||||
|
||||
const templates = [
|
||||
{ filePath: '/path/1', name: 'different', repository: 'test/image:v1.0' },
|
||||
];
|
||||
|
||||
const result = (service as any).matchContainerToTemplate(container, templates);
|
||||
|
||||
expect(result).toEqual(templates[0]);
|
||||
});
|
||||
|
||||
it('should return null when no match found', () => {
|
||||
const container: DockerContainer = {
|
||||
id: 'abc123',
|
||||
names: ['/my-container'],
|
||||
image: 'test/image:latest',
|
||||
} as DockerContainer;
|
||||
|
||||
const templates = [{ filePath: '/path/1', name: 'different', repository: 'other/image' }];
|
||||
|
||||
const result = (service as any).matchContainerToTemplate(container, templates);
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should be case-insensitive', () => {
|
||||
const container: DockerContainer = {
|
||||
id: 'abc123',
|
||||
names: ['/Test-Container'],
|
||||
image: 'Test/Image:latest',
|
||||
} as DockerContainer;
|
||||
|
||||
const templates = [
|
||||
{ filePath: '/path/1', name: 'test-container', repository: 'test/image' },
|
||||
];
|
||||
|
||||
const result = (service as any).matchContainerToTemplate(container, templates);
|
||||
|
||||
expect(result).toEqual(templates[0]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('scanTemplates', () => {
|
||||
it('should scan templates and create mappings', async () => {
|
||||
const template1 = join(testTemplateDir, 'redis.xml');
|
||||
await writeFile(
|
||||
template1,
|
||||
`<?xml version="1.0"?>
|
||||
<Container version="2">
|
||||
<Name>redis</Name>
|
||||
<Repository>redis</Repository>
|
||||
</Container>`
|
||||
);
|
||||
|
||||
const containers: DockerContainer[] = [
|
||||
{
|
||||
id: 'container1',
|
||||
names: ['/redis'],
|
||||
image: 'redis:latest',
|
||||
} as DockerContainer,
|
||||
];
|
||||
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(containers);
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: [],
|
||||
});
|
||||
|
||||
const result = await service.scanTemplates();
|
||||
|
||||
expect(result.scanned).toBe(1);
|
||||
expect(result.matched).toBe(1);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
expect(dockerConfigService.replaceConfig).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
templateMappings: {
|
||||
redis: template1,
|
||||
},
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should skip containers in skipTemplatePaths', async () => {
|
||||
const template1 = join(testTemplateDir, 'redis.xml');
|
||||
await writeFile(
|
||||
template1,
|
||||
`<?xml version="1.0"?>
|
||||
<Container version="2">
|
||||
<Name>redis</Name>
|
||||
<Repository>redis</Repository>
|
||||
</Container>`
|
||||
);
|
||||
|
||||
const containers: DockerContainer[] = [
|
||||
{
|
||||
id: 'container1',
|
||||
names: ['/redis'],
|
||||
image: 'redis:latest',
|
||||
} as DockerContainer,
|
||||
];
|
||||
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(containers);
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: ['redis'],
|
||||
});
|
||||
|
||||
const result = await service.scanTemplates();
|
||||
|
||||
expect(result.skipped).toBe(1);
|
||||
expect(result.matched).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle missing template directory gracefully', async () => {
|
||||
await rm(testTemplateDir, { recursive: true, force: true });
|
||||
|
||||
const containers: DockerContainer[] = [];
|
||||
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(containers);
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: [],
|
||||
});
|
||||
|
||||
const result = await service.scanTemplates();
|
||||
|
||||
expect(result.scanned).toBe(0);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle docker service errors gracefully', async () => {
|
||||
vi.mocked(dockerService.getContainers).mockRejectedValue(new Error('Docker error'));
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: [],
|
||||
});
|
||||
|
||||
const result = await service.scanTemplates();
|
||||
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
expect(result.errors[0]).toContain('Failed to get containers');
|
||||
});
|
||||
|
||||
it('should set null mapping for unmatched containers', async () => {
|
||||
const containers: DockerContainer[] = [
|
||||
{
|
||||
id: 'container1',
|
||||
names: ['/unknown'],
|
||||
image: 'unknown:latest',
|
||||
} as DockerContainer,
|
||||
];
|
||||
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(containers);
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: [],
|
||||
});
|
||||
|
||||
await service.scanTemplates();
|
||||
|
||||
expect(dockerConfigService.replaceConfig).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
templateMappings: {
|
||||
unknown: null,
|
||||
},
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('syncMissingContainers', () => {
|
||||
it('should return true and trigger scan when containers are missing mappings', async () => {
|
||||
const containers: DockerContainer[] = [
|
||||
{
|
||||
id: 'container1',
|
||||
names: ['/redis'],
|
||||
image: 'redis:latest',
|
||||
} as DockerContainer,
|
||||
];
|
||||
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: [],
|
||||
});
|
||||
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(containers);
|
||||
|
||||
const scanSpy = vi.spyOn(service, 'scanTemplates').mockResolvedValue({
|
||||
scanned: 0,
|
||||
matched: 0,
|
||||
skipped: 0,
|
||||
errors: [],
|
||||
});
|
||||
|
||||
const result = await service.syncMissingContainers(containers);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(scanSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return false when all containers have mappings', async () => {
|
||||
const containers: DockerContainer[] = [
|
||||
{
|
||||
id: 'container1',
|
||||
names: ['/redis'],
|
||||
image: 'redis:latest',
|
||||
} as DockerContainer,
|
||||
];
|
||||
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {
|
||||
redis: '/path/to/template.xml',
|
||||
},
|
||||
skipTemplatePaths: [],
|
||||
});
|
||||
|
||||
const scanSpy = vi.spyOn(service, 'scanTemplates');
|
||||
|
||||
const result = await service.syncMissingContainers(containers);
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(scanSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not trigger scan for containers in skip list', async () => {
|
||||
const containers: DockerContainer[] = [
|
||||
{
|
||||
id: 'container1',
|
||||
names: ['/redis'],
|
||||
image: 'redis:latest',
|
||||
} as DockerContainer,
|
||||
];
|
||||
|
||||
vi.mocked(dockerConfigService.getConfig).mockReturnValue({
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
templateMappings: {},
|
||||
skipTemplatePaths: ['redis'],
|
||||
});
|
||||
|
||||
const scanSpy = vi.spyOn(service, 'scanTemplates');
|
||||
|
||||
const result = await service.syncMissingContainers(containers);
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(scanSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('normalizeContainerName', () => {
|
||||
it('should remove leading slash', () => {
|
||||
const result = (service as any).normalizeContainerName('/container-name');
|
||||
expect(result).toBe('container-name');
|
||||
});
|
||||
|
||||
it('should convert to lowercase', () => {
|
||||
const result = (service as any).normalizeContainerName('/Container-Name');
|
||||
expect(result).toBe('container-name');
|
||||
});
|
||||
});
|
||||
|
||||
describe('normalizeRepository', () => {
|
||||
it('should strip tag', () => {
|
||||
const result = (service as any).normalizeRepository('redis:latest');
|
||||
expect(result).toBe('redis');
|
||||
});
|
||||
|
||||
it('should strip version tag', () => {
|
||||
const result = (service as any).normalizeRepository('postgres:14.5');
|
||||
expect(result).toBe('postgres');
|
||||
});
|
||||
|
||||
it('should convert to lowercase', () => {
|
||||
const result = (service as any).normalizeRepository('Redis:Latest');
|
||||
expect(result).toBe('redis');
|
||||
});
|
||||
|
||||
it('should handle repository without tag', () => {
|
||||
const result = (service as any).normalizeRepository('nginx');
|
||||
expect(result).toBe('nginx');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,293 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { Timeout } from '@nestjs/schedule';
|
||||
import { readdir, readFile } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
import { XMLParser } from 'fast-xml-parser';
|
||||
|
||||
import { ENABLE_NEXT_DOCKER_RELEASE, PATHS_DOCKER_TEMPLATES } from '@app/environment.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerTemplateSyncResult } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.model.js';
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
|
||||
interface ParsedTemplate {
|
||||
filePath: string;
|
||||
name?: string;
|
||||
repository?: string;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class DockerTemplateScannerService {
|
||||
private readonly logger = new Logger(DockerTemplateScannerService.name);
|
||||
private readonly xmlParser = new XMLParser({
|
||||
ignoreAttributes: false,
|
||||
parseAttributeValue: true,
|
||||
trimValues: true,
|
||||
});
|
||||
|
||||
constructor(
|
||||
private readonly dockerConfigService: DockerConfigService,
|
||||
private readonly dockerService: DockerService
|
||||
) {}
|
||||
|
||||
@Timeout(5_000)
|
||||
async bootstrapScan(attempt = 1, maxAttempts = 5): Promise<void> {
|
||||
if (!ENABLE_NEXT_DOCKER_RELEASE) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
this.logger.log(`Starting template scan (attempt ${attempt}/${maxAttempts})`);
|
||||
const result = await this.scanTemplates();
|
||||
this.logger.log(
|
||||
`Template scan complete: ${result.matched} matched, ${result.scanned} scanned, ${result.skipped} skipped`
|
||||
);
|
||||
} catch (error) {
|
||||
if (attempt < maxAttempts) {
|
||||
this.logger.warn(
|
||||
`Template scan failed (attempt ${attempt}/${maxAttempts}), retrying in 60s: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
setTimeout(() => this.bootstrapScan(attempt + 1, maxAttempts), 60_000);
|
||||
} else {
|
||||
this.logger.error(
|
||||
`Template scan failed after ${maxAttempts} attempts: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async syncMissingContainers(containers: DockerContainer[]): Promise<boolean> {
|
||||
const config = this.dockerConfigService.getConfig();
|
||||
const mappings = config.templateMappings || {};
|
||||
const skipSet = new Set(config.skipTemplatePaths || []);
|
||||
|
||||
const needsSync = containers.filter((c) => {
|
||||
const containerName = this.normalizeContainerName(c.names[0]);
|
||||
return !mappings[containerName] && !skipSet.has(containerName);
|
||||
});
|
||||
|
||||
if (needsSync.length > 0) {
|
||||
this.logger.log(
|
||||
`Found ${needsSync.length} containers without template mappings, triggering sync`
|
||||
);
|
||||
await this.scanTemplates();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async scanTemplates(): Promise<DockerTemplateSyncResult> {
|
||||
const result: DockerTemplateSyncResult = {
|
||||
scanned: 0,
|
||||
matched: 0,
|
||||
skipped: 0,
|
||||
errors: [],
|
||||
};
|
||||
|
||||
const templates = await this.loadAllTemplates(result);
|
||||
|
||||
try {
|
||||
const containers = await this.dockerService.getContainers({ skipCache: true });
|
||||
const config = this.dockerConfigService.getConfig();
|
||||
const currentMappings = config.templateMappings || {};
|
||||
const skipSet = new Set(config.skipTemplatePaths || []);
|
||||
|
||||
const newMappings: Record<string, string | null> = { ...currentMappings };
|
||||
|
||||
for (const container of containers) {
|
||||
const containerName = this.normalizeContainerName(container.names[0]);
|
||||
if (skipSet.has(containerName)) {
|
||||
result.skipped++;
|
||||
continue;
|
||||
}
|
||||
|
||||
const match = this.matchContainerToTemplate(container, templates);
|
||||
if (match) {
|
||||
newMappings[containerName] = match.filePath;
|
||||
result.matched++;
|
||||
} else {
|
||||
newMappings[containerName] = null;
|
||||
}
|
||||
}
|
||||
|
||||
await this.updateMappings(newMappings);
|
||||
} catch (error) {
|
||||
const errorMsg = `Failed to get containers: ${error instanceof Error ? error.message : 'Unknown error'}`;
|
||||
this.logger.error(error, 'Failed to get containers');
|
||||
result.errors.push(errorMsg);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async getTemplateDetails(filePath: string): Promise<{
|
||||
project?: string;
|
||||
registry?: string;
|
||||
support?: string;
|
||||
overview?: string;
|
||||
icon?: string;
|
||||
webUi?: string;
|
||||
shell?: string;
|
||||
ports?: Array<{ privatePort: number; publicPort: number; type: 'tcp' | 'udp' }>;
|
||||
} | null> {
|
||||
try {
|
||||
const content = await readFile(filePath, 'utf-8');
|
||||
const parsed = this.xmlParser.parse(content);
|
||||
|
||||
if (!parsed.Container) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const container = parsed.Container;
|
||||
const ports = this.extractTemplatePorts(container);
|
||||
|
||||
return {
|
||||
project: container.Project,
|
||||
registry: container.Registry,
|
||||
support: container.Support,
|
||||
overview: container.ReadMe || container.Overview,
|
||||
icon: container.Icon,
|
||||
webUi: container.WebUI,
|
||||
shell: container.Shell,
|
||||
ports,
|
||||
};
|
||||
} catch (error) {
|
||||
this.logger.warn(
|
||||
`Failed to parse template ${filePath}: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private extractTemplatePorts(
|
||||
container: Record<string, unknown>
|
||||
): Array<{ privatePort: number; publicPort: number; type: 'tcp' | 'udp' }> {
|
||||
const ports: Array<{ privatePort: number; publicPort: number; type: 'tcp' | 'udp' }> = [];
|
||||
|
||||
const configs = container.Config;
|
||||
if (!configs) {
|
||||
return ports;
|
||||
}
|
||||
|
||||
const configArray = Array.isArray(configs) ? configs : [configs];
|
||||
|
||||
for (const config of configArray) {
|
||||
if (!config || typeof config !== 'object') continue;
|
||||
|
||||
const attrs = config['@_Type'];
|
||||
if (attrs !== 'Port') continue;
|
||||
|
||||
const target = config['@_Target'];
|
||||
const mode = config['@_Mode'];
|
||||
const value = config['#text'];
|
||||
|
||||
if (target === undefined || value === undefined) continue;
|
||||
|
||||
const privatePort = parseInt(String(target), 10);
|
||||
const publicPort = parseInt(String(value), 10);
|
||||
|
||||
if (isNaN(privatePort) || isNaN(publicPort)) continue;
|
||||
|
||||
const type = String(mode).toLowerCase() === 'udp' ? 'udp' : 'tcp';
|
||||
ports.push({ privatePort, publicPort, type });
|
||||
}
|
||||
|
||||
return ports;
|
||||
}
|
||||
|
||||
private async loadAllTemplates(result: DockerTemplateSyncResult): Promise<ParsedTemplate[]> {
|
||||
const allTemplates: ParsedTemplate[] = [];
|
||||
|
||||
for (const directory of PATHS_DOCKER_TEMPLATES) {
|
||||
try {
|
||||
const files = await readdir(directory);
|
||||
const xmlFiles = files.filter((f) => f.endsWith('.xml'));
|
||||
result.scanned += xmlFiles.length;
|
||||
|
||||
for (const file of xmlFiles) {
|
||||
const filePath = join(directory, file);
|
||||
try {
|
||||
const template = await this.parseTemplate(filePath);
|
||||
if (template) {
|
||||
allTemplates.push(template);
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMsg = `Failed to parse template ${filePath}: ${error instanceof Error ? error.message : 'Unknown error'}`;
|
||||
this.logger.warn(errorMsg);
|
||||
result.errors.push(errorMsg);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMsg = `Failed to read template directory ${directory}: ${error instanceof Error ? error.message : 'Unknown error'}`;
|
||||
this.logger.warn(errorMsg);
|
||||
result.errors.push(errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
return allTemplates;
|
||||
}
|
||||
|
||||
private async parseTemplate(filePath: string): Promise<ParsedTemplate | null> {
|
||||
const content = await readFile(filePath, 'utf-8');
|
||||
const parsed = this.xmlParser.parse(content);
|
||||
|
||||
if (!parsed.Container) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const container = parsed.Container;
|
||||
return {
|
||||
filePath,
|
||||
name: container.Name,
|
||||
repository: container.Repository,
|
||||
};
|
||||
}
|
||||
|
||||
private matchContainerToTemplate(
|
||||
container: DockerContainer,
|
||||
templates: ParsedTemplate[]
|
||||
): ParsedTemplate | null {
|
||||
const containerName = this.normalizeContainerName(container.names[0]);
|
||||
const containerImage = this.normalizeRepository(container.image);
|
||||
|
||||
for (const template of templates) {
|
||||
if (template.name && this.normalizeContainerName(template.name) === containerName) {
|
||||
return template;
|
||||
}
|
||||
}
|
||||
|
||||
for (const template of templates) {
|
||||
if (
|
||||
template.repository &&
|
||||
this.normalizeRepository(template.repository) === containerImage
|
||||
) {
|
||||
return template;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private normalizeContainerName(name: string): string {
|
||||
return name.replace(/^\//, '').toLowerCase();
|
||||
}
|
||||
|
||||
private normalizeRepository(repository: string): string {
|
||||
// Strip digest if present (e.g., image@sha256:abc123)
|
||||
const [withoutDigest] = repository.split('@');
|
||||
// Only remove tag if colon appears after last slash (i.e., it's a tag, not a port)
|
||||
const lastColon = withoutDigest.lastIndexOf(':');
|
||||
const lastSlash = withoutDigest.lastIndexOf('/');
|
||||
const withoutTag = lastColon > lastSlash ? withoutDigest.slice(0, lastColon) : withoutDigest;
|
||||
return withoutTag.toLowerCase();
|
||||
}
|
||||
|
||||
private async updateMappings(mappings: Record<string, string | null>): Promise<void> {
|
||||
const config = this.dockerConfigService.getConfig();
|
||||
const updated = await this.dockerConfigService.validate({
|
||||
...config,
|
||||
templateMappings: mappings,
|
||||
});
|
||||
this.dockerConfigService.replaceConfig(updated);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,15 @@
|
||||
import { Field, ID, Int, ObjectType, registerEnumType } from '@nestjs/graphql';
|
||||
import {
|
||||
Field,
|
||||
Float,
|
||||
GraphQLISODateTime,
|
||||
InputType,
|
||||
Int,
|
||||
ObjectType,
|
||||
registerEnumType,
|
||||
} from '@nestjs/graphql';
|
||||
|
||||
import { Node } from '@unraid/shared/graphql.model.js';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
import { GraphQLBigInt, GraphQLJSON, GraphQLPort } from 'graphql-scalars';
|
||||
|
||||
export enum ContainerPortType {
|
||||
@@ -27,8 +36,54 @@ export class ContainerPort {
|
||||
type!: ContainerPortType;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerPortConflictContainer {
|
||||
@Field(() => PrefixedID)
|
||||
id!: string;
|
||||
|
||||
@Field(() => String)
|
||||
name!: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerContainerPortConflict {
|
||||
@Field(() => GraphQLPort)
|
||||
privatePort!: number;
|
||||
|
||||
@Field(() => ContainerPortType)
|
||||
type!: ContainerPortType;
|
||||
|
||||
@Field(() => [DockerPortConflictContainer])
|
||||
containers!: DockerPortConflictContainer[];
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerLanPortConflict {
|
||||
@Field(() => String)
|
||||
lanIpPort!: string;
|
||||
|
||||
@Field(() => GraphQLPort, { nullable: true })
|
||||
publicPort?: number;
|
||||
|
||||
@Field(() => ContainerPortType)
|
||||
type!: ContainerPortType;
|
||||
|
||||
@Field(() => [DockerPortConflictContainer])
|
||||
containers!: DockerPortConflictContainer[];
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerPortConflicts {
|
||||
@Field(() => [DockerContainerPortConflict])
|
||||
containerPorts!: DockerContainerPortConflict[];
|
||||
|
||||
@Field(() => [DockerLanPortConflict])
|
||||
lanPorts!: DockerLanPortConflict[];
|
||||
}
|
||||
|
||||
export enum ContainerState {
|
||||
RUNNING = 'RUNNING',
|
||||
PAUSED = 'PAUSED',
|
||||
EXITED = 'EXITED',
|
||||
}
|
||||
|
||||
@@ -89,12 +144,30 @@ export class DockerContainer extends Node {
|
||||
@Field(() => [ContainerPort])
|
||||
ports!: ContainerPort[];
|
||||
|
||||
@Field(() => [String], {
|
||||
nullable: true,
|
||||
description: 'List of LAN-accessible host:port values',
|
||||
})
|
||||
lanIpPorts?: string[];
|
||||
|
||||
@Field(() => GraphQLBigInt, {
|
||||
nullable: true,
|
||||
description: 'Total size of all files in the container (in bytes)',
|
||||
})
|
||||
sizeRootFs?: number;
|
||||
|
||||
@Field(() => GraphQLBigInt, {
|
||||
nullable: true,
|
||||
description: 'Size of writable layer (in bytes)',
|
||||
})
|
||||
sizeRw?: number;
|
||||
|
||||
@Field(() => GraphQLBigInt, {
|
||||
nullable: true,
|
||||
description: 'Size of container logs (in bytes)',
|
||||
})
|
||||
sizeLog?: number;
|
||||
|
||||
@Field(() => GraphQLJSON, { nullable: true })
|
||||
labels?: Record<string, any>;
|
||||
|
||||
@@ -115,6 +188,45 @@ export class DockerContainer extends Node {
|
||||
|
||||
@Field(() => Boolean)
|
||||
autoStart!: boolean;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Zero-based order in the auto-start list' })
|
||||
autoStartOrder?: number;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Wait time in seconds applied after start' })
|
||||
autoStartWait?: number;
|
||||
|
||||
@Field(() => String, { nullable: true })
|
||||
templatePath?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Project/Product homepage URL' })
|
||||
projectUrl?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Registry/Docker Hub URL' })
|
||||
registryUrl?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Support page/thread URL' })
|
||||
supportUrl?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Icon URL' })
|
||||
iconUrl?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Resolved WebUI URL from template' })
|
||||
webUiUrl?: string;
|
||||
|
||||
@Field(() => String, {
|
||||
nullable: true,
|
||||
description: 'Shell to use for console access (from template)',
|
||||
})
|
||||
shell?: string;
|
||||
|
||||
@Field(() => [ContainerPort], {
|
||||
nullable: true,
|
||||
description: 'Port mappings from template (used when container is not running)',
|
||||
})
|
||||
templatePorts?: ContainerPort[];
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the container is orphaned (no template found)' })
|
||||
isOrphaned!: boolean;
|
||||
}
|
||||
|
||||
@ObjectType({ implements: () => Node })
|
||||
@@ -162,6 +274,127 @@ export class DockerNetwork extends Node {
|
||||
labels!: Record<string, any>;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerContainerLogLine {
|
||||
@Field(() => GraphQLISODateTime)
|
||||
timestamp!: Date;
|
||||
|
||||
@Field(() => String)
|
||||
message!: string;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerContainerLogs {
|
||||
@Field(() => PrefixedID)
|
||||
containerId!: string;
|
||||
|
||||
@Field(() => [DockerContainerLogLine])
|
||||
lines!: DockerContainerLogLine[];
|
||||
|
||||
@Field(() => GraphQLISODateTime, {
|
||||
nullable: true,
|
||||
description:
|
||||
'Cursor that can be passed back through the since argument to continue streaming logs.',
|
||||
})
|
||||
cursor?: Date | null;
|
||||
}
|
||||
|
||||
@ObjectType()
|
||||
export class DockerContainerStats {
|
||||
@Field(() => PrefixedID)
|
||||
id!: string;
|
||||
|
||||
@Field(() => Float, { description: 'CPU Usage Percentage' })
|
||||
cpuPercent!: number;
|
||||
|
||||
@Field(() => String, { description: 'Memory Usage String (e.g. 100MB / 1GB)' })
|
||||
memUsage!: string;
|
||||
|
||||
@Field(() => Float, { description: 'Memory Usage Percentage' })
|
||||
memPercent!: number;
|
||||
|
||||
@Field(() => String, { description: 'Network I/O String (e.g. 100MB / 1GB)' })
|
||||
netIO!: string;
|
||||
|
||||
@Field(() => String, { description: 'Block I/O String (e.g. 100MB / 1GB)' })
|
||||
blockIO!: string;
|
||||
}
|
||||
|
||||
@ObjectType({ description: 'Tailscale exit node connection status' })
|
||||
export class TailscaleExitNodeStatus {
|
||||
@Field(() => Boolean, { description: 'Whether the exit node is online' })
|
||||
online!: boolean;
|
||||
|
||||
@Field(() => [String], { nullable: true, description: 'Tailscale IPs of the exit node' })
|
||||
tailscaleIps?: string[];
|
||||
}
|
||||
|
||||
@ObjectType({ description: 'Tailscale status for a Docker container' })
|
||||
export class TailscaleStatus {
|
||||
@Field(() => Boolean, { description: 'Whether Tailscale is online in the container' })
|
||||
online!: boolean;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Current Tailscale version' })
|
||||
version?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Latest available Tailscale version' })
|
||||
latestVersion?: string;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether a Tailscale update is available' })
|
||||
updateAvailable!: boolean;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Configured Tailscale hostname' })
|
||||
hostname?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Actual Tailscale DNS name' })
|
||||
dnsName?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'DERP relay code' })
|
||||
relay?: string;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'DERP relay region name' })
|
||||
relayName?: string;
|
||||
|
||||
@Field(() => [String], { nullable: true, description: 'Tailscale IPv4 and IPv6 addresses' })
|
||||
tailscaleIps?: string[];
|
||||
|
||||
@Field(() => [String], { nullable: true, description: 'Advertised subnet routes' })
|
||||
primaryRoutes?: string[];
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether this container is an exit node' })
|
||||
isExitNode!: boolean;
|
||||
|
||||
@Field(() => TailscaleExitNodeStatus, {
|
||||
nullable: true,
|
||||
description: 'Status of the connected exit node (if using one)',
|
||||
})
|
||||
exitNodeStatus?: TailscaleExitNodeStatus;
|
||||
|
||||
@Field(() => String, { nullable: true, description: 'Tailscale Serve/Funnel WebUI URL' })
|
||||
webUiUrl?: string;
|
||||
|
||||
@Field(() => GraphQLISODateTime, { nullable: true, description: 'Tailscale key expiry date' })
|
||||
keyExpiry?: Date;
|
||||
|
||||
@Field(() => Int, { nullable: true, description: 'Days until key expires' })
|
||||
keyExpiryDays?: number;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the Tailscale key has expired' })
|
||||
keyExpired!: boolean;
|
||||
|
||||
@Field(() => String, {
|
||||
nullable: true,
|
||||
description: 'Tailscale backend state (Running, NeedsLogin, Stopped, etc.)',
|
||||
})
|
||||
backendState?: string;
|
||||
|
||||
@Field(() => String, {
|
||||
nullable: true,
|
||||
description: 'Authentication URL if Tailscale needs login',
|
||||
})
|
||||
authUrl?: string;
|
||||
}
|
||||
|
||||
@ObjectType({
|
||||
implements: () => Node,
|
||||
})
|
||||
@@ -171,4 +404,28 @@ export class Docker extends Node {
|
||||
|
||||
@Field(() => [DockerNetwork])
|
||||
networks!: DockerNetwork[];
|
||||
|
||||
@Field(() => DockerPortConflicts)
|
||||
portConflicts!: DockerPortConflicts;
|
||||
|
||||
@Field(() => DockerContainerLogs, {
|
||||
description:
|
||||
'Access container logs. Requires specifying a target container id through resolver arguments.',
|
||||
})
|
||||
logs!: DockerContainerLogs;
|
||||
}
|
||||
|
||||
@InputType()
|
||||
export class DockerAutostartEntryInput {
|
||||
@Field(() => PrefixedID, { description: 'Docker container identifier' })
|
||||
id!: string;
|
||||
|
||||
@Field(() => Boolean, { description: 'Whether the container should auto-start' })
|
||||
autoStart!: boolean;
|
||||
|
||||
@Field(() => Int, {
|
||||
nullable: true,
|
||||
description: 'Number of seconds to wait after starting the container',
|
||||
})
|
||||
wait?: number | null;
|
||||
}
|
||||
|
||||
@@ -1,21 +1,28 @@
|
||||
import { CacheModule } from '@nestjs/cache-manager';
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerEventService } from '@app/unraid-api/graph/resolvers/docker/docker-event.service.js';
|
||||
import { DockerLogService } from '@app/unraid-api/graph/resolvers/docker/docker-log.service.js';
|
||||
import { DockerNetworkService } from '@app/unraid-api/graph/resolvers/docker/docker-network.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { DockerPortService } from '@app/unraid-api/graph/resolvers/docker/docker-port.service.js';
|
||||
import { DockerStatsService } from '@app/unraid-api/graph/resolvers/docker/docker-stats.service.js';
|
||||
import { DockerTemplateScannerService } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.service.js';
|
||||
import { DockerModule } from '@app/unraid-api/graph/resolvers/docker/docker.module.js';
|
||||
import { DockerMutationsResolver } from '@app/unraid-api/graph/resolvers/docker/docker.mutations.resolver.js';
|
||||
import { DockerResolver } from '@app/unraid-api/graph/resolvers/docker/docker.resolver.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerConfigService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer-config.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
import { SubscriptionHelperService } from '@app/unraid-api/graph/services/subscription-helper.service.js';
|
||||
import { SubscriptionTrackerService } from '@app/unraid-api/graph/services/subscription-tracker.service.js';
|
||||
|
||||
describe('DockerModule', () => {
|
||||
it('should compile the module', async () => {
|
||||
const module = await Test.createTestingModule({
|
||||
imports: [DockerModule],
|
||||
imports: [CacheModule.register({ isGlobal: true }), DockerModule],
|
||||
})
|
||||
.overrideProvider(DockerService)
|
||||
.useValue({ getDockerClient: vi.fn() })
|
||||
@@ -23,6 +30,22 @@ describe('DockerModule', () => {
|
||||
.useValue({ getConfig: vi.fn() })
|
||||
.overrideProvider(DockerConfigService)
|
||||
.useValue({ getConfig: vi.fn() })
|
||||
.overrideProvider(DockerLogService)
|
||||
.useValue({})
|
||||
.overrideProvider(DockerNetworkService)
|
||||
.useValue({})
|
||||
.overrideProvider(DockerPortService)
|
||||
.useValue({})
|
||||
.overrideProvider(SubscriptionTrackerService)
|
||||
.useValue({
|
||||
registerTopic: vi.fn(),
|
||||
subscribe: vi.fn(),
|
||||
unsubscribe: vi.fn(),
|
||||
})
|
||||
.overrideProvider(SubscriptionHelperService)
|
||||
.useValue({
|
||||
createTrackedSubscription: vi.fn(),
|
||||
})
|
||||
.compile();
|
||||
|
||||
expect(module).toBeDefined();
|
||||
@@ -46,25 +69,52 @@ describe('DockerModule', () => {
|
||||
expect(service).toHaveProperty('getDockerClient');
|
||||
});
|
||||
|
||||
it('should provide DockerEventService', async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
DockerEventService,
|
||||
{ provide: DockerService, useValue: { getDockerClient: vi.fn() } },
|
||||
],
|
||||
}).compile();
|
||||
|
||||
const service = module.get<DockerEventService>(DockerEventService);
|
||||
expect(service).toBeInstanceOf(DockerEventService);
|
||||
});
|
||||
|
||||
it('should provide DockerResolver', async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
DockerResolver,
|
||||
{ provide: DockerService, useValue: {} },
|
||||
{ provide: DockerService, useValue: { clearContainerCache: vi.fn() } },
|
||||
{
|
||||
provide: DockerConfigService,
|
||||
useValue: {
|
||||
defaultConfig: vi
|
||||
.fn()
|
||||
.mockReturnValue({ templateMappings: {}, skipTemplatePaths: [] }),
|
||||
getConfig: vi
|
||||
.fn()
|
||||
.mockReturnValue({ templateMappings: {}, skipTemplatePaths: [] }),
|
||||
validate: vi.fn().mockImplementation((config) => Promise.resolve(config)),
|
||||
replaceConfig: vi.fn(),
|
||||
},
|
||||
},
|
||||
{ provide: DockerOrganizerService, useValue: {} },
|
||||
{ provide: DockerPhpService, useValue: { getContainerUpdateStatuses: vi.fn() } },
|
||||
{
|
||||
provide: DockerTemplateScannerService,
|
||||
useValue: {
|
||||
scanTemplates: vi.fn(),
|
||||
syncMissingContainers: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: DockerStatsService,
|
||||
useValue: {
|
||||
startStatsStream: vi.fn(),
|
||||
stopStatsStream: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: SubscriptionTrackerService,
|
||||
useValue: {
|
||||
registerTopic: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: SubscriptionHelperService,
|
||||
useValue: {
|
||||
createTrackedSubscription: vi.fn(),
|
||||
},
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
|
||||
@@ -2,27 +2,44 @@ import { Module } from '@nestjs/common';
|
||||
|
||||
import { JobModule } from '@app/unraid-api/cron/job.module.js';
|
||||
import { ContainerStatusJob } from '@app/unraid-api/graph/resolvers/docker/container-status.job.js';
|
||||
import { DockerAutostartService } from '@app/unraid-api/graph/resolvers/docker/docker-autostart.service.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerContainerResolver } from '@app/unraid-api/graph/resolvers/docker/docker-container.resolver.js';
|
||||
import { DockerLogService } from '@app/unraid-api/graph/resolvers/docker/docker-log.service.js';
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
import { DockerNetworkService } from '@app/unraid-api/graph/resolvers/docker/docker-network.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { DockerPortService } from '@app/unraid-api/graph/resolvers/docker/docker-port.service.js';
|
||||
import { DockerStatsService } from '@app/unraid-api/graph/resolvers/docker/docker-stats.service.js';
|
||||
import { DockerTailscaleService } from '@app/unraid-api/graph/resolvers/docker/docker-tailscale.service.js';
|
||||
import { DockerTemplateIconService } from '@app/unraid-api/graph/resolvers/docker/docker-template-icon.service.js';
|
||||
import { DockerTemplateScannerService } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.service.js';
|
||||
import { DockerMutationsResolver } from '@app/unraid-api/graph/resolvers/docker/docker.mutations.resolver.js';
|
||||
import { DockerResolver } from '@app/unraid-api/graph/resolvers/docker/docker.resolver.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerConfigService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer-config.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
import { NotificationsModule } from '@app/unraid-api/graph/resolvers/notifications/notifications.module.js';
|
||||
import { ServicesModule } from '@app/unraid-api/graph/services/services.module.js';
|
||||
|
||||
@Module({
|
||||
imports: [JobModule],
|
||||
imports: [JobModule, NotificationsModule, ServicesModule],
|
||||
providers: [
|
||||
// Services
|
||||
DockerService,
|
||||
DockerAutostartService,
|
||||
DockerOrganizerConfigService,
|
||||
DockerOrganizerService,
|
||||
DockerManifestService,
|
||||
DockerPhpService,
|
||||
DockerConfigService,
|
||||
// DockerEventService,
|
||||
DockerTemplateScannerService,
|
||||
DockerTemplateIconService,
|
||||
DockerStatsService,
|
||||
DockerTailscaleService,
|
||||
DockerLogService,
|
||||
DockerNetworkService,
|
||||
DockerPortService,
|
||||
|
||||
// Jobs
|
||||
ContainerStatusJob,
|
||||
|
||||
@@ -45,6 +45,7 @@ describe('DockerMutationsResolver', () => {
|
||||
state: ContainerState.RUNNING,
|
||||
status: 'Up 2 hours',
|
||||
names: ['test-container'],
|
||||
isOrphaned: false,
|
||||
};
|
||||
vi.mocked(dockerService.start).mockResolvedValue(mockContainer);
|
||||
|
||||
@@ -65,6 +66,7 @@ describe('DockerMutationsResolver', () => {
|
||||
state: ContainerState.EXITED,
|
||||
status: 'Exited',
|
||||
names: ['test-container'],
|
||||
isOrphaned: false,
|
||||
};
|
||||
vi.mocked(dockerService.stop).mockResolvedValue(mockContainer);
|
||||
|
||||
|
||||
@@ -4,7 +4,11 @@ import { AuthAction, Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
import { UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
import {
|
||||
DockerAutostartEntryInput,
|
||||
DockerContainer,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerMutations } from '@app/unraid-api/graph/resolvers/mutation/mutation.model.js';
|
||||
|
||||
@@ -32,4 +36,86 @@ export class DockerMutationsResolver {
|
||||
public async stop(@Args('id', { type: () => PrefixedID }) id: string) {
|
||||
return this.dockerService.stop(id);
|
||||
}
|
||||
@ResolveField(() => DockerContainer, { description: 'Pause (Suspend) a container' })
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async pause(@Args('id', { type: () => PrefixedID }) id: string) {
|
||||
return this.dockerService.pause(id);
|
||||
}
|
||||
@ResolveField(() => DockerContainer, { description: 'Unpause (Resume) a container' })
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async unpause(@Args('id', { type: () => PrefixedID }) id: string) {
|
||||
return this.dockerService.unpause(id);
|
||||
}
|
||||
|
||||
@ResolveField(() => Boolean, { description: 'Remove a container' })
|
||||
@UsePermissions({
|
||||
action: AuthAction.DELETE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async removeContainer(
|
||||
@Args('id', { type: () => PrefixedID }) id: string,
|
||||
@Args('withImage', { type: () => Boolean, nullable: true }) withImage?: boolean
|
||||
) {
|
||||
return this.dockerService.removeContainer(id, { withImage });
|
||||
}
|
||||
|
||||
@ResolveField(() => Boolean, {
|
||||
description: 'Update auto-start configuration for Docker containers',
|
||||
})
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async updateAutostartConfiguration(
|
||||
@Args('entries', { type: () => [DockerAutostartEntryInput] })
|
||||
entries: DockerAutostartEntryInput[],
|
||||
@Args('persistUserPreferences', { type: () => Boolean, nullable: true })
|
||||
persistUserPreferences?: boolean
|
||||
) {
|
||||
await this.dockerService.updateAutostartConfiguration(entries, {
|
||||
persistUserPreferences,
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
@ResolveField(() => DockerContainer, { description: 'Update a container to the latest image' })
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async updateContainer(@Args('id', { type: () => PrefixedID }) id: string) {
|
||||
return this.dockerService.updateContainer(id);
|
||||
}
|
||||
|
||||
@ResolveField(() => [DockerContainer], {
|
||||
description: 'Update multiple containers to the latest images',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async updateContainers(
|
||||
@Args('ids', { type: () => [PrefixedID] })
|
||||
ids: string[]
|
||||
) {
|
||||
return this.dockerService.updateContainers(ids);
|
||||
}
|
||||
|
||||
@ResolveField(() => [DockerContainer], {
|
||||
description: 'Update all containers that have available updates',
|
||||
})
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
public async updateAllContainers() {
|
||||
return this.dockerService.updateAllContainers();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,20 @@ import { Test } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { ContainerState, DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerStatsService } from '@app/unraid-api/graph/resolvers/docker/docker-stats.service.js';
|
||||
import { DockerTemplateScannerService } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.service.js';
|
||||
import {
|
||||
ContainerState,
|
||||
DockerContainer,
|
||||
DockerContainerLogs,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerResolver } from '@app/unraid-api/graph/resolvers/docker/docker.resolver.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
import { SubscriptionHelperService } from '@app/unraid-api/graph/services/subscription-helper.service.js';
|
||||
import { SubscriptionTrackerService } from '@app/unraid-api/graph/services/subscription-tracker.service.js';
|
||||
import { GraphQLFieldHelper } from '@app/unraid-api/utils/graphql-field-helper.js';
|
||||
|
||||
vi.mock('@app/unraid-api/utils/graphql-field-helper.js', () => ({
|
||||
@@ -29,6 +38,22 @@ describe('DockerResolver', () => {
|
||||
useValue: {
|
||||
getContainers: vi.fn(),
|
||||
getNetworks: vi.fn(),
|
||||
getContainerLogSizes: vi.fn(),
|
||||
getContainerLogs: vi.fn(),
|
||||
clearContainerCache: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: DockerConfigService,
|
||||
useValue: {
|
||||
defaultConfig: vi
|
||||
.fn()
|
||||
.mockReturnValue({ templateMappings: {}, skipTemplatePaths: [] }),
|
||||
getConfig: vi
|
||||
.fn()
|
||||
.mockReturnValue({ templateMappings: {}, skipTemplatePaths: [] }),
|
||||
validate: vi.fn().mockImplementation((config) => Promise.resolve(config)),
|
||||
replaceConfig: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -43,6 +68,39 @@ describe('DockerResolver', () => {
|
||||
getContainerUpdateStatuses: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: DockerTemplateScannerService,
|
||||
useValue: {
|
||||
scanTemplates: vi.fn().mockResolvedValue({
|
||||
scanned: 0,
|
||||
matched: 0,
|
||||
skipped: 0,
|
||||
errors: [],
|
||||
}),
|
||||
syncMissingContainers: vi.fn().mockResolvedValue(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: DockerStatsService,
|
||||
useValue: {
|
||||
startStatsStream: vi.fn(),
|
||||
stopStatsStream: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: SubscriptionTrackerService,
|
||||
useValue: {
|
||||
registerTopic: vi.fn(),
|
||||
subscribe: vi.fn(),
|
||||
unsubscribe: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: SubscriptionHelperService,
|
||||
useValue: {
|
||||
createTrackedSubscription: vi.fn(),
|
||||
},
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
@@ -51,6 +109,8 @@ describe('DockerResolver', () => {
|
||||
|
||||
// Reset mocks before each test
|
||||
vi.clearAllMocks();
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation(() => false);
|
||||
vi.mocked(dockerService.getContainerLogSizes).mockResolvedValue(new Map());
|
||||
});
|
||||
|
||||
it('should be defined', () => {
|
||||
@@ -75,6 +135,7 @@ describe('DockerResolver', () => {
|
||||
ports: [],
|
||||
state: ContainerState.EXITED,
|
||||
status: 'Exited',
|
||||
isOrphaned: false,
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
@@ -87,16 +148,19 @@ describe('DockerResolver', () => {
|
||||
ports: [],
|
||||
state: ContainerState.RUNNING,
|
||||
status: 'Up 2 hours',
|
||||
isOrphaned: false,
|
||||
},
|
||||
];
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(mockContainers);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockReturnValue(false);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation(() => false);
|
||||
|
||||
const mockInfo = {} as any;
|
||||
|
||||
const result = await resolver.containers(false, mockInfo);
|
||||
expect(result).toEqual(mockContainers);
|
||||
expect(GraphQLFieldHelper.isFieldRequested).toHaveBeenCalledWith(mockInfo, 'sizeRootFs');
|
||||
expect(GraphQLFieldHelper.isFieldRequested).toHaveBeenCalledWith(mockInfo, 'sizeRw');
|
||||
expect(GraphQLFieldHelper.isFieldRequested).toHaveBeenCalledWith(mockInfo, 'sizeLog');
|
||||
expect(dockerService.getContainers).toHaveBeenCalledWith({ skipCache: false, size: false });
|
||||
});
|
||||
|
||||
@@ -114,10 +178,13 @@ describe('DockerResolver', () => {
|
||||
sizeRootFs: 1024000,
|
||||
state: ContainerState.EXITED,
|
||||
status: 'Exited',
|
||||
isOrphaned: false,
|
||||
},
|
||||
];
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(mockContainers);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockReturnValue(true);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation((_, field) => {
|
||||
return field === 'sizeRootFs';
|
||||
});
|
||||
|
||||
const mockInfo = {} as any;
|
||||
|
||||
@@ -127,10 +194,61 @@ describe('DockerResolver', () => {
|
||||
expect(dockerService.getContainers).toHaveBeenCalledWith({ skipCache: false, size: true });
|
||||
});
|
||||
|
||||
it('should request size when sizeRw field is requested', async () => {
|
||||
const mockContainers: DockerContainer[] = [];
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(mockContainers);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation((_, field) => {
|
||||
return field === 'sizeRw';
|
||||
});
|
||||
|
||||
const mockInfo = {} as any;
|
||||
|
||||
await resolver.containers(false, mockInfo);
|
||||
expect(GraphQLFieldHelper.isFieldRequested).toHaveBeenCalledWith(mockInfo, 'sizeRw');
|
||||
expect(dockerService.getContainers).toHaveBeenCalledWith({ skipCache: false, size: true });
|
||||
});
|
||||
|
||||
it('should fetch log sizes when sizeLog field is requested', async () => {
|
||||
const mockContainers: DockerContainer[] = [
|
||||
{
|
||||
id: '1',
|
||||
autoStart: false,
|
||||
command: 'test',
|
||||
names: ['/test-container'],
|
||||
created: 1234567890,
|
||||
image: 'test-image',
|
||||
imageId: 'test-image-id',
|
||||
ports: [],
|
||||
state: ContainerState.EXITED,
|
||||
status: 'Exited',
|
||||
isOrphaned: false,
|
||||
},
|
||||
];
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(mockContainers);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation((_, field) => {
|
||||
if (field === 'sizeLog') return true;
|
||||
return false;
|
||||
});
|
||||
|
||||
const logSizeMap = new Map<string, number>([['test-container', 42]]);
|
||||
vi.mocked(dockerService.getContainerLogSizes).mockResolvedValue(logSizeMap);
|
||||
|
||||
const mockInfo = {} as any;
|
||||
|
||||
const result = await resolver.containers(false, mockInfo);
|
||||
|
||||
expect(GraphQLFieldHelper.isFieldRequested).toHaveBeenCalledWith(mockInfo, 'sizeLog');
|
||||
expect(dockerService.getContainerLogSizes).toHaveBeenCalledWith(['test-container']);
|
||||
expect(result[0]?.sizeLog).toBe(42);
|
||||
expect(dockerService.getContainers).toHaveBeenCalledWith({ skipCache: false, size: false });
|
||||
});
|
||||
|
||||
it('should request size when GraphQLFieldHelper indicates sizeRootFs is requested', async () => {
|
||||
const mockContainers: DockerContainer[] = [];
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(mockContainers);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockReturnValue(true);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation((_, field) => {
|
||||
return field === 'sizeRootFs';
|
||||
});
|
||||
|
||||
const mockInfo = {} as any;
|
||||
|
||||
@@ -142,7 +260,7 @@ describe('DockerResolver', () => {
|
||||
it('should not request size when GraphQLFieldHelper indicates sizeRootFs is not requested', async () => {
|
||||
const mockContainers: DockerContainer[] = [];
|
||||
vi.mocked(dockerService.getContainers).mockResolvedValue(mockContainers);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockReturnValue(false);
|
||||
vi.mocked(GraphQLFieldHelper.isFieldRequested).mockImplementation(() => false);
|
||||
|
||||
const mockInfo = {} as any;
|
||||
|
||||
@@ -161,4 +279,22 @@ describe('DockerResolver', () => {
|
||||
await resolver.containers(true, mockInfo);
|
||||
expect(dockerService.getContainers).toHaveBeenCalledWith({ skipCache: true, size: false });
|
||||
});
|
||||
|
||||
it('should fetch container logs with provided arguments', async () => {
|
||||
const since = new Date('2024-01-01T00:00:00.000Z');
|
||||
const logResult: DockerContainerLogs = {
|
||||
containerId: '1',
|
||||
lines: [],
|
||||
cursor: since,
|
||||
};
|
||||
vi.mocked(dockerService.getContainerLogs).mockResolvedValue(logResult);
|
||||
|
||||
const result = await resolver.logs('1', since, 25);
|
||||
|
||||
expect(result).toEqual(logResult);
|
||||
expect(dockerService.getContainerLogs).toHaveBeenCalledWith('1', {
|
||||
since,
|
||||
tail: 25,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,19 +1,41 @@
|
||||
import { Args, Info, Mutation, Query, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
import {
|
||||
Args,
|
||||
GraphQLISODateTime,
|
||||
Info,
|
||||
Int,
|
||||
Mutation,
|
||||
Query,
|
||||
ResolveField,
|
||||
Resolver,
|
||||
Subscription,
|
||||
} from '@nestjs/graphql';
|
||||
|
||||
import type { GraphQLResolveInfo } from 'graphql';
|
||||
import { AuthAction, Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js';
|
||||
import { UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
import { GraphQLJSON } from 'graphql-scalars';
|
||||
|
||||
import { PUBSUB_CHANNEL } from '@app/core/pubsub.js';
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { DockerStatsService } from '@app/unraid-api/graph/resolvers/docker/docker-stats.service.js';
|
||||
import { DockerTemplateSyncResult } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.model.js';
|
||||
import { DockerTemplateScannerService } from '@app/unraid-api/graph/resolvers/docker/docker-template-scanner.service.js';
|
||||
import { ExplicitStatusItem } from '@app/unraid-api/graph/resolvers/docker/docker-update-status.model.js';
|
||||
import {
|
||||
Docker,
|
||||
DockerContainer,
|
||||
DockerContainerLogs,
|
||||
DockerContainerStats,
|
||||
DockerNetwork,
|
||||
DockerPortConflicts,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
import { SubscriptionHelperService } from '@app/unraid-api/graph/services/subscription-helper.service.js';
|
||||
import { SubscriptionTrackerService } from '@app/unraid-api/graph/services/subscription-tracker.service.js';
|
||||
import { DEFAULT_ORGANIZER_ROOT_ID } from '@app/unraid-api/organizer/organizer.js';
|
||||
import { ResolvedOrganizerV1 } from '@app/unraid-api/organizer/organizer.model.js';
|
||||
import { GraphQLFieldHelper } from '@app/unraid-api/utils/graphql-field-helper.js';
|
||||
@@ -22,9 +44,20 @@ import { GraphQLFieldHelper } from '@app/unraid-api/utils/graphql-field-helper.j
|
||||
export class DockerResolver {
|
||||
constructor(
|
||||
private readonly dockerService: DockerService,
|
||||
private readonly dockerConfigService: DockerConfigService,
|
||||
private readonly dockerOrganizerService: DockerOrganizerService,
|
||||
private readonly dockerPhpService: DockerPhpService
|
||||
) {}
|
||||
private readonly dockerPhpService: DockerPhpService,
|
||||
private readonly dockerTemplateScannerService: DockerTemplateScannerService,
|
||||
private readonly dockerStatsService: DockerStatsService,
|
||||
private readonly subscriptionTracker: SubscriptionTrackerService,
|
||||
private readonly subscriptionHelper: SubscriptionHelperService
|
||||
) {
|
||||
this.subscriptionTracker.registerTopic(
|
||||
PUBSUB_CHANNEL.DOCKER_STATS,
|
||||
() => this.dockerStatsService.startStatsStream(),
|
||||
() => this.dockerStatsService.stopStatsStream()
|
||||
);
|
||||
}
|
||||
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
@@ -37,6 +70,17 @@ export class DockerResolver {
|
||||
};
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => DockerContainer, { nullable: true })
|
||||
public async container(@Args('id', { type: () => PrefixedID }) id: string) {
|
||||
const containers = await this.dockerService.getContainers({ skipCache: false });
|
||||
return containers.find((c) => c.id === id) ?? null;
|
||||
}
|
||||
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
@@ -46,8 +90,47 @@ export class DockerResolver {
|
||||
@Args('skipCache', { defaultValue: false, type: () => Boolean }) skipCache: boolean,
|
||||
@Info() info: GraphQLResolveInfo
|
||||
) {
|
||||
const requestsSize = GraphQLFieldHelper.isFieldRequested(info, 'sizeRootFs');
|
||||
return this.dockerService.getContainers({ skipCache, size: requestsSize });
|
||||
const requestsRootFsSize = GraphQLFieldHelper.isFieldRequested(info, 'sizeRootFs');
|
||||
const requestsRwSize = GraphQLFieldHelper.isFieldRequested(info, 'sizeRw');
|
||||
const requestsLogSize = GraphQLFieldHelper.isFieldRequested(info, 'sizeLog');
|
||||
const containers = await this.dockerService.getContainers({
|
||||
skipCache,
|
||||
size: requestsRootFsSize || requestsRwSize,
|
||||
});
|
||||
|
||||
if (requestsLogSize) {
|
||||
const names = Array.from(
|
||||
new Set(
|
||||
containers
|
||||
.map((container) => container.names?.[0]?.replace(/^\//, '') || null)
|
||||
.filter((name): name is string => Boolean(name))
|
||||
)
|
||||
);
|
||||
const logSizes = await this.dockerService.getContainerLogSizes(names);
|
||||
containers.forEach((container) => {
|
||||
const normalized = container.names?.[0]?.replace(/^\//, '') || '';
|
||||
container.sizeLog = normalized ? (logSizes.get(normalized) ?? 0) : 0;
|
||||
});
|
||||
}
|
||||
|
||||
const wasSynced = await this.dockerTemplateScannerService.syncMissingContainers(containers);
|
||||
return wasSynced ? await this.dockerService.getContainers({ skipCache: true }) : containers;
|
||||
}
|
||||
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => DockerContainerLogs)
|
||||
public async logs(
|
||||
@Args('id', { type: () => PrefixedID }) id: string,
|
||||
@Args('since', { type: () => GraphQLISODateTime, nullable: true }) since?: Date | null,
|
||||
@Args('tail', { type: () => Int, nullable: true }) tail?: number | null
|
||||
) {
|
||||
return this.dockerService.getContainerLogs(id, {
|
||||
since: since ?? undefined,
|
||||
tail,
|
||||
});
|
||||
}
|
||||
|
||||
@UsePermissions({
|
||||
@@ -61,14 +144,27 @@ export class DockerResolver {
|
||||
return this.dockerService.getNetworks({ skipCache });
|
||||
}
|
||||
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => DockerPortConflicts)
|
||||
public async portConflicts(
|
||||
@Args('skipCache', { defaultValue: false, type: () => Boolean }) skipCache: boolean
|
||||
) {
|
||||
return this.dockerService.getPortConflicts({ skipCache });
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => ResolvedOrganizerV1)
|
||||
public async organizer() {
|
||||
return this.dockerOrganizerService.resolveOrganizer();
|
||||
public async organizer(
|
||||
@Args('skipCache', { defaultValue: false, type: () => Boolean }) skipCache: boolean
|
||||
) {
|
||||
return this.dockerOrganizerService.resolveOrganizer(undefined, { skipCache });
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@@ -107,6 +203,11 @@ export class DockerResolver {
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes organizer entries (folders). When a folder is deleted, its container
|
||||
* children are automatically appended to the end of the root folder via
|
||||
* `addMissingResourcesToView`. Containers are never permanently deleted by this operation.
|
||||
*/
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
@@ -137,6 +238,80 @@ export class DockerResolver {
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => ResolvedOrganizerV1)
|
||||
public async moveDockerItemsToPosition(
|
||||
@Args('sourceEntryIds', { type: () => [String] }) sourceEntryIds: string[],
|
||||
@Args('destinationFolderId') destinationFolderId: string,
|
||||
@Args('position', { type: () => Number }) position: number
|
||||
) {
|
||||
const organizer = await this.dockerOrganizerService.moveItemsToPosition({
|
||||
sourceEntryIds,
|
||||
destinationFolderId,
|
||||
position,
|
||||
});
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => ResolvedOrganizerV1)
|
||||
public async renameDockerFolder(
|
||||
@Args('folderId') folderId: string,
|
||||
@Args('newName') newName: string
|
||||
) {
|
||||
const organizer = await this.dockerOrganizerService.renameFolderById({
|
||||
folderId,
|
||||
newName,
|
||||
});
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => ResolvedOrganizerV1)
|
||||
public async createDockerFolderWithItems(
|
||||
@Args('name') name: string,
|
||||
@Args('parentId', { nullable: true }) parentId?: string,
|
||||
@Args('sourceEntryIds', { type: () => [String], nullable: true }) sourceEntryIds?: string[],
|
||||
@Args('position', { type: () => Number, nullable: true }) position?: number
|
||||
) {
|
||||
const organizer = await this.dockerOrganizerService.createFolderWithItems({
|
||||
name,
|
||||
parentId: parentId ?? DEFAULT_ORGANIZER_ROOT_ID,
|
||||
sourceEntryIds: sourceEntryIds ?? [],
|
||||
position,
|
||||
});
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => ResolvedOrganizerV1)
|
||||
public async updateDockerViewPreferences(
|
||||
@Args('viewId', { nullable: true, defaultValue: 'default' }) viewId: string,
|
||||
@Args('prefs', { type: () => GraphQLJSON }) prefs: Record<string, unknown>
|
||||
) {
|
||||
const organizer = await this.dockerOrganizerService.updateViewPreferences({
|
||||
viewId,
|
||||
prefs,
|
||||
});
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
@@ -146,4 +321,48 @@ export class DockerResolver {
|
||||
public async containerUpdateStatuses() {
|
||||
return this.dockerPhpService.getContainerUpdateStatuses();
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => DockerTemplateSyncResult)
|
||||
public async syncDockerTemplatePaths() {
|
||||
return this.dockerTemplateScannerService.scanTemplates();
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => Boolean, {
|
||||
description:
|
||||
'Reset Docker template mappings to defaults. Use this to recover from corrupted state.',
|
||||
})
|
||||
public async resetDockerTemplateMappings(): Promise<boolean> {
|
||||
const defaultConfig = this.dockerConfigService.defaultConfig();
|
||||
const currentConfig = this.dockerConfigService.getConfig();
|
||||
const resetConfig = {
|
||||
...currentConfig,
|
||||
templateMappings: defaultConfig.templateMappings,
|
||||
skipTemplatePaths: defaultConfig.skipTemplatePaths,
|
||||
};
|
||||
const validated = await this.dockerConfigService.validate(resetConfig);
|
||||
this.dockerConfigService.replaceConfig(validated);
|
||||
await this.dockerService.clearContainerCache();
|
||||
return true;
|
||||
}
|
||||
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Subscription(() => DockerContainerStats, {
|
||||
resolve: (payload) => payload.dockerContainerStats,
|
||||
})
|
||||
public dockerContainerStats() {
|
||||
return this.subscriptionHelper.createTrackedSubscription(PUBSUB_CHANNEL.DOCKER_STATS);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,169 @@
|
||||
import { CACHE_MANAGER } from '@nestjs/cache-manager';
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
import { mkdtemp, readFile, rm } from 'fs/promises';
|
||||
import { tmpdir } from 'os';
|
||||
import { join } from 'path';
|
||||
|
||||
import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerAutostartService } from '@app/unraid-api/graph/resolvers/docker/docker-autostart.service.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerLogService } from '@app/unraid-api/graph/resolvers/docker/docker-log.service.js';
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
import { DockerNetworkService } from '@app/unraid-api/graph/resolvers/docker/docker-network.service.js';
|
||||
import { DockerPortService } from '@app/unraid-api/graph/resolvers/docker/docker-port.service.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { NotificationsService } from '@app/unraid-api/graph/resolvers/notifications/notifications.service.js';
|
||||
|
||||
// Mock dependencies that are not focus of integration
|
||||
const mockNotificationsService = {
|
||||
notifyIfUnique: vi.fn(),
|
||||
};
|
||||
|
||||
const mockDockerConfigService = {
|
||||
getConfig: vi.fn().mockReturnValue({ templateMappings: {} }),
|
||||
};
|
||||
|
||||
const mockDockerManifestService = {
|
||||
getCachedUpdateStatuses: vi.fn().mockResolvedValue({}),
|
||||
isUpdateAvailableCached: vi.fn().mockResolvedValue(false),
|
||||
};
|
||||
|
||||
const mockCacheManager = {
|
||||
get: vi.fn(),
|
||||
set: vi.fn(),
|
||||
del: vi.fn(),
|
||||
};
|
||||
|
||||
// Hoisted mock for paths
|
||||
const { mockPaths } = vi.hoisted(() => ({
|
||||
mockPaths: {
|
||||
'docker-autostart': '',
|
||||
'docker-userprefs': '',
|
||||
'docker-socket': '/var/run/docker.sock',
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('@app/store/index.js', () => ({
|
||||
getters: {
|
||||
paths: () => mockPaths,
|
||||
emhttp: () => ({ networks: [] }),
|
||||
},
|
||||
}));
|
||||
|
||||
// Check for Docker availability
|
||||
let dockerAvailable = false;
|
||||
try {
|
||||
const Docker = (await import('dockerode')).default;
|
||||
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
|
||||
await docker.ping();
|
||||
dockerAvailable = true;
|
||||
} catch {
|
||||
console.warn('Docker not available or not accessible at /var/run/docker.sock');
|
||||
}
|
||||
|
||||
describe.runIf(dockerAvailable)('DockerService Integration', () => {
|
||||
let service: DockerService;
|
||||
let autostartService: DockerAutostartService;
|
||||
let module: TestingModule;
|
||||
let tempDir: string;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Setup temp dir for config files
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'unraid-api-docker-test-'));
|
||||
mockPaths['docker-autostart'] = join(tempDir, 'docker-autostart');
|
||||
mockPaths['docker-userprefs'] = join(tempDir, 'docker-userprefs');
|
||||
|
||||
module = await Test.createTestingModule({
|
||||
providers: [
|
||||
DockerService,
|
||||
DockerAutostartService,
|
||||
DockerLogService,
|
||||
DockerNetworkService,
|
||||
DockerPortService,
|
||||
{ provide: CACHE_MANAGER, useValue: mockCacheManager },
|
||||
{ provide: DockerConfigService, useValue: mockDockerConfigService },
|
||||
{ provide: DockerManifestService, useValue: mockDockerManifestService },
|
||||
{ provide: NotificationsService, useValue: mockNotificationsService },
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerService>(DockerService);
|
||||
autostartService = module.get<DockerAutostartService>(DockerAutostartService);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (tempDir) {
|
||||
await rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('should fetch containers from docker daemon', async () => {
|
||||
const containers = await service.getContainers({ skipCache: true });
|
||||
expect(Array.isArray(containers)).toBe(true);
|
||||
if (containers.length > 0) {
|
||||
expect(containers[0]).toHaveProperty('id');
|
||||
expect(containers[0]).toHaveProperty('names');
|
||||
expect(containers[0].state).toBeDefined();
|
||||
}
|
||||
});
|
||||
|
||||
it('should fetch networks from docker daemon', async () => {
|
||||
const networks = await service.getNetworks({ skipCache: true });
|
||||
expect(Array.isArray(networks)).toBe(true);
|
||||
// Default networks (bridge, host, null) should always exist
|
||||
expect(networks.length).toBeGreaterThan(0);
|
||||
const bridge = networks.find((n) => n.name === 'bridge');
|
||||
expect(bridge).toBeDefined();
|
||||
});
|
||||
|
||||
it('should manage autostart configuration in temp files', async () => {
|
||||
const containers = await service.getContainers({ skipCache: true });
|
||||
if (containers.length === 0) {
|
||||
console.warn('No containers found, skipping autostart write test');
|
||||
return;
|
||||
}
|
||||
|
||||
const target = containers[0];
|
||||
// Ensure name is valid for autostart file (strip /)
|
||||
const primaryName = autostartService.getContainerPrimaryName(target as any);
|
||||
expect(primaryName).toBeTruthy();
|
||||
|
||||
const entry = {
|
||||
id: target.id,
|
||||
autoStart: true,
|
||||
wait: 10,
|
||||
};
|
||||
|
||||
await service.updateAutostartConfiguration([entry], { persistUserPreferences: true });
|
||||
|
||||
// Verify file content
|
||||
try {
|
||||
const content = await readFile(mockPaths['docker-autostart'], 'utf8');
|
||||
expect(content).toContain(primaryName);
|
||||
expect(content).toContain('10');
|
||||
} catch (error: any) {
|
||||
// If file doesn't exist, it might be because logic didn't write anything (e.g. name issue)
|
||||
// But we expect it to write if container exists and we passed valid entry
|
||||
throw new Error(`Failed to read autostart file: ${error.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
it('should get container logs using dockerode', async () => {
|
||||
const containers = await service.getContainers({ skipCache: true });
|
||||
const running = containers.find((c) => c.state === 'RUNNING'); // Enum value is string 'RUNNING'
|
||||
|
||||
if (!running) {
|
||||
console.warn('No running containers found, skipping log test');
|
||||
return;
|
||||
}
|
||||
|
||||
// This test verifies that the execa -> dockerode switch works for logs
|
||||
// If it fails, it likely means the log parsing or dockerode interaction is wrong.
|
||||
const logs = await service.getContainerLogs(running.id, { tail: 10 });
|
||||
expect(logs).toBeDefined();
|
||||
expect(logs.containerId).toBe(running.id);
|
||||
expect(Array.isArray(logs.lines)).toBe(true);
|
||||
// We can't guarantee lines length > 0 if container is silent, but it shouldn't throw.
|
||||
});
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user