mirror of
https://github.com/unraid/api.git
synced 2026-01-02 22:50:02 -06:00
Compare commits
22 Commits
4.19.1-bui
...
v4.21.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
88a924c84f | ||
|
|
ae4d3ecbc4 | ||
|
|
c569043ab5 | ||
|
|
50ea2a3ffb | ||
|
|
b518131406 | ||
|
|
e57d81e073 | ||
|
|
88baddd6c0 | ||
|
|
abc22bdb87 | ||
|
|
6ed2f5ce8e | ||
|
|
b79b44e95c | ||
|
|
ca22285a26 | ||
|
|
838be2c52e | ||
|
|
73c1100d0b | ||
|
|
434e331384 | ||
|
|
a27453fda8 | ||
|
|
98e6058cd8 | ||
|
|
6c2c51ae1d | ||
|
|
d10c12035e | ||
|
|
5dd6f42550 | ||
|
|
4759b3d0b3 | ||
|
|
daeeba8c1f | ||
|
|
196bd52628 |
@@ -1,123 +1,3 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"# Development Commands",
|
||||
"Bash(pnpm install)",
|
||||
"Bash(pnpm dev)",
|
||||
"Bash(pnpm build)",
|
||||
"Bash(pnpm test)",
|
||||
"Bash(pnpm test:*)",
|
||||
"Bash(pnpm lint)",
|
||||
"Bash(pnpm lint:fix)",
|
||||
"Bash(pnpm type-check)",
|
||||
"Bash(pnpm codegen)",
|
||||
"Bash(pnpm storybook)",
|
||||
"Bash(pnpm --filter * dev)",
|
||||
"Bash(pnpm --filter * build)",
|
||||
"Bash(pnpm --filter * test)",
|
||||
"Bash(pnpm --filter * lint)",
|
||||
"Bash(pnpm --filter * codegen)",
|
||||
|
||||
"# Git Commands (read-only)",
|
||||
"Bash(git status)",
|
||||
"Bash(git diff)",
|
||||
"Bash(git log)",
|
||||
"Bash(git branch)",
|
||||
"Bash(git remote -v)",
|
||||
|
||||
"# Search Commands",
|
||||
"Bash(rg *)",
|
||||
|
||||
"# File System (read-only)",
|
||||
"Bash(ls)",
|
||||
"Bash(ls -la)",
|
||||
"Bash(pwd)",
|
||||
"Bash(find . -name)",
|
||||
"Bash(find . -type)",
|
||||
|
||||
"# Node/NPM Commands",
|
||||
"Bash(node --version)",
|
||||
"Bash(pnpm --version)",
|
||||
"Bash(npx --version)",
|
||||
|
||||
"# Environment Commands",
|
||||
"Bash(echo $*)",
|
||||
"Bash(which *)",
|
||||
|
||||
"# Process Commands",
|
||||
"Bash(ps aux | grep)",
|
||||
"Bash(lsof -i)",
|
||||
|
||||
"# Documentation Domains",
|
||||
"WebFetch(domain:tailwindcss.com)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"WebFetch(domain:reka-ui.com)",
|
||||
"WebFetch(domain:nodejs.org)",
|
||||
"WebFetch(domain:pnpm.io)",
|
||||
"WebFetch(domain:vitejs.dev)",
|
||||
"WebFetch(domain:nuxt.com)",
|
||||
"WebFetch(domain:nestjs.com)",
|
||||
|
||||
"# IDE Integration",
|
||||
"mcp__ide__getDiagnostics",
|
||||
|
||||
"# Browser MCP (for testing)",
|
||||
"mcp__browsermcp__browser_navigate",
|
||||
"mcp__browsermcp__browser_click",
|
||||
"mcp__browsermcp__browser_screenshot"
|
||||
],
|
||||
"deny": [
|
||||
"# Dangerous Commands",
|
||||
"Bash(rm -rf)",
|
||||
"Bash(chmod 777)",
|
||||
"Bash(curl)",
|
||||
"Bash(wget)",
|
||||
"Bash(ssh)",
|
||||
"Bash(scp)",
|
||||
"Bash(sudo)",
|
||||
"Bash(su)",
|
||||
"Bash(pkill)",
|
||||
"Bash(kill)",
|
||||
"Bash(killall)",
|
||||
"Bash(python)",
|
||||
"Bash(python3)",
|
||||
"Bash(pip)",
|
||||
"Bash(npm)",
|
||||
"Bash(yarn)",
|
||||
"Bash(apt)",
|
||||
"Bash(brew)",
|
||||
"Bash(systemctl)",
|
||||
"Bash(service)",
|
||||
"Bash(docker)",
|
||||
"Bash(docker-compose)",
|
||||
|
||||
"# File Modification (use Edit/Write tools instead)",
|
||||
"Bash(sed)",
|
||||
"Bash(awk)",
|
||||
"Bash(perl)",
|
||||
"Bash(echo > *)",
|
||||
"Bash(echo >> *)",
|
||||
"Bash(cat > *)",
|
||||
"Bash(cat >> *)",
|
||||
"Bash(tee)",
|
||||
|
||||
"# Git Write Commands (require explicit user action)",
|
||||
"Bash(git add)",
|
||||
"Bash(git commit)",
|
||||
"Bash(git push)",
|
||||
"Bash(git pull)",
|
||||
"Bash(git merge)",
|
||||
"Bash(git rebase)",
|
||||
"Bash(git checkout)",
|
||||
"Bash(git reset)",
|
||||
"Bash(git clean)",
|
||||
|
||||
"# Package Management Write Commands",
|
||||
"Bash(pnpm add)",
|
||||
"Bash(pnpm remove)",
|
||||
"Bash(pnpm update)",
|
||||
"Bash(pnpm upgrade)"
|
||||
]
|
||||
},
|
||||
"enableAllProjectMcpServers": false
|
||||
"permissions": {}
|
||||
}
|
||||
37
.github/workflows/build-plugin.yml
vendored
37
.github/workflows/build-plugin.yml
vendored
@@ -183,3 +183,40 @@ jobs:
|
||||
```
|
||||
${{ inputs.BASE_URL }}/tag/${{ inputs.TAG }}/dynamix.unraid.net.plg
|
||||
```
|
||||
|
||||
- name: Clean up old preview builds
|
||||
if: inputs.RELEASE_CREATED == 'false' && github.event_name == 'push'
|
||||
continue-on-error: true
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CF_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CF_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: auto
|
||||
run: |
|
||||
echo "🧹 Cleaning up old preview builds (keeping last 7 days)..."
|
||||
|
||||
# Calculate cutoff date (7 days ago)
|
||||
CUTOFF_DATE=$(date -d "7 days ago" +"%Y.%m.%d")
|
||||
echo "Deleting builds older than: ${CUTOFF_DATE}"
|
||||
|
||||
# List and delete old timestamped .txz files
|
||||
OLD_FILES=$(aws s3 ls "s3://${{ secrets.CF_BUCKET_PREVIEW }}/unraid-api/" \
|
||||
--endpoint-url ${{ secrets.CF_ENDPOINT }} --recursive | \
|
||||
grep -E "dynamix\.unraid\.net-[0-9]{4}\.[0-9]{2}\.[0-9]{2}\.[0-9]{4}\.txz" | \
|
||||
awk '{print $4}' || true)
|
||||
|
||||
DELETED_COUNT=0
|
||||
if [ -n "$OLD_FILES" ]; then
|
||||
while IFS= read -r file; do
|
||||
if [[ $file =~ ([0-9]{4}\.[0-9]{2}\.[0-9]{2})\.[0-9]{4}\.txz ]]; then
|
||||
FILE_DATE="${BASH_REMATCH[1]}"
|
||||
if [[ "$FILE_DATE" < "$CUTOFF_DATE" ]]; then
|
||||
echo "Deleting old build: $(basename "$file")"
|
||||
aws s3 rm "s3://${{ secrets.CF_BUCKET_PREVIEW }}/${file}" \
|
||||
--endpoint-url ${{ secrets.CF_ENDPOINT }} || true
|
||||
((DELETED_COUNT++))
|
||||
fi
|
||||
fi
|
||||
done <<< "$OLD_FILES"
|
||||
fi
|
||||
|
||||
echo "✅ Deleted ${DELETED_COUNT} old builds"
|
||||
|
||||
2
.github/workflows/deploy-storybook.yml
vendored
2
.github/workflows/deploy-storybook.yml
vendored
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
- name: Comment PR with deployment URL
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.createComment({
|
||||
|
||||
47
.github/workflows/main.yml
vendored
47
.github/workflows/main.yml
vendored
@@ -8,27 +8,9 @@ on:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
release-please:
|
||||
name: Release Please
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
# Only run release-please on pushes to main
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
|
||||
- id: release
|
||||
uses: googleapis/release-please-action@v4
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
outputs:
|
||||
releases_created: ${{ steps.release.outputs.releases_created || 'false' }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name || '' }}
|
||||
test-api:
|
||||
name: Test API
|
||||
defaults:
|
||||
@@ -386,10 +368,32 @@ jobs:
|
||||
name: unraid-wc-rich
|
||||
path: web/dist
|
||||
|
||||
release-please:
|
||||
name: Release Please
|
||||
runs-on: ubuntu-latest
|
||||
# Only run on pushes to main AND after tests pass
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
needs:
|
||||
- test-api
|
||||
- build-api
|
||||
- build-web
|
||||
- build-unraid-ui-webcomponents
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- id: release
|
||||
uses: googleapis/release-please-action@v4
|
||||
outputs:
|
||||
releases_created: ${{ steps.release.outputs.releases_created || 'false' }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name || '' }}
|
||||
|
||||
build-plugin-staging-pr:
|
||||
name: Build and Deploy Plugin
|
||||
needs:
|
||||
- release-please
|
||||
- build-api
|
||||
- build-web
|
||||
- build-unraid-ui-webcomponents
|
||||
@@ -413,9 +417,6 @@ jobs:
|
||||
needs:
|
||||
- release-please
|
||||
- build-api
|
||||
- build-web
|
||||
- build-unraid-ui-webcomponents
|
||||
- test-api
|
||||
uses: ./.github/workflows/build-plugin.yml
|
||||
with:
|
||||
RELEASE_CREATED: true
|
||||
|
||||
95
.github/workflows/push-staging-pr-on-close.yml
vendored
95
.github/workflows/push-staging-pr-on-close.yml
vendored
@@ -1,4 +1,9 @@
|
||||
name: Push Staging Plugin on PR Close
|
||||
name: Replace PR Plugin with Staging Redirect on Merge
|
||||
|
||||
# This workflow runs when a PR is merged and replaces the PR-specific plugin
|
||||
# with a redirect version that points to the main staging URL.
|
||||
# This ensures users who installed the PR version will automatically
|
||||
# update to the staging version on their next update check.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -17,7 +22,7 @@ on:
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
push-staging:
|
||||
push-staging-redirect:
|
||||
if: (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || (github.event_name == 'workflow_dispatch' && inputs.pr_merged == true)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -45,11 +50,12 @@ jobs:
|
||||
name: unraid-plugin-.*
|
||||
path: connect-files
|
||||
pr: ${{ steps.pr_number.outputs.pr_number }}
|
||||
workflow: main.yml
|
||||
workflow_conclusion: success
|
||||
workflow_search: true
|
||||
search_artifacts: true
|
||||
if_no_artifact_found: fail
|
||||
|
||||
- name: Update Downloaded Staging Plugin to New Date
|
||||
- name: Update Downloaded Plugin to Redirect to Staging
|
||||
run: |
|
||||
# Find the .plg file in the downloaded artifact
|
||||
plgfile=$(find connect-files -name "*.plg" -type f | head -1)
|
||||
@@ -60,23 +66,82 @@ jobs:
|
||||
fi
|
||||
|
||||
echo "Found plugin file: $plgfile"
|
||||
version=$(date +"%Y.%m.%d.%H%M")
|
||||
sed -i -E "s#(<!ENTITY version \").*(\">)#\1${version}\2#g" "${plgfile}" || exit 1
|
||||
|
||||
# Get current version and bump it with current timestamp
|
||||
current_version=$(grep '<!ENTITY version' "${plgfile}" | sed -E 's/.*"(.*)".*/\1/')
|
||||
echo "Current version: ${current_version}"
|
||||
|
||||
# Create new version with current timestamp (ensures it's newer)
|
||||
new_version=$(date +"%Y.%m.%d.%H%M")
|
||||
echo "New redirect version: ${new_version}"
|
||||
|
||||
# Update version to trigger update
|
||||
sed -i -E "s#(<!ENTITY version \").*(\">)#\1${new_version}\2#g" "${plgfile}" || exit 1
|
||||
|
||||
# Change the plugin url to point to staging
|
||||
# Change the plugin url to point to staging - users will switch to staging on next update
|
||||
url="https://preview.dl.unraid.net/unraid-api/dynamix.unraid.net.plg"
|
||||
sed -i -E "s#(<!ENTITY plugin_url \").*?(\">)#\1${url}\2#g" "${plgfile}" || exit 1
|
||||
cat "${plgfile}"
|
||||
|
||||
echo "Modified plugin to redirect to: ${url}"
|
||||
echo "Version bumped from ${current_version} to ${new_version}"
|
||||
|
||||
mkdir -p pr-release
|
||||
mv "${plgfile}" pr-release/dynamix.unraid.net.plg
|
||||
|
||||
- name: Upload to Cloudflare
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
- name: Clean up old PR artifacts from Cloudflare
|
||||
env:
|
||||
AWS_S3_ENDPOINT: ${{ secrets.CF_ENDPOINT }}
|
||||
AWS_S3_BUCKET: ${{ secrets.CF_BUCKET_PREVIEW }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CF_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CF_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: "auto"
|
||||
SOURCE_DIR: pr-release
|
||||
DEST_DIR: unraid-api/tag/PR${{ steps.pr_number.outputs.pr_number }}
|
||||
AWS_DEFAULT_REGION: auto
|
||||
run: |
|
||||
# Delete all existing files in the PR directory first (txz, plg, etc.)
|
||||
aws s3 rm s3://${{ secrets.CF_BUCKET_PREVIEW }}/unraid-api/tag/PR${{ steps.pr_number.outputs.pr_number }}/ \
|
||||
--recursive \
|
||||
--endpoint-url ${{ secrets.CF_ENDPOINT }}
|
||||
|
||||
echo "✅ Cleaned up old PR artifacts"
|
||||
|
||||
- name: Upload PR Redirect Plugin to Cloudflare
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CF_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CF_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: auto
|
||||
run: |
|
||||
# Upload only the redirect plugin file
|
||||
aws s3 cp pr-release/dynamix.unraid.net.plg \
|
||||
s3://${{ secrets.CF_BUCKET_PREVIEW }}/unraid-api/tag/PR${{ steps.pr_number.outputs.pr_number }}/dynamix.unraid.net.plg \
|
||||
--endpoint-url ${{ secrets.CF_ENDPOINT }} \
|
||||
--content-encoding none \
|
||||
--acl public-read
|
||||
|
||||
echo "✅ Uploaded redirect plugin"
|
||||
|
||||
- name: Output redirect information
|
||||
run: |
|
||||
echo "✅ PR plugin replaced with staging redirect version"
|
||||
echo "PR URL remains: https://preview.dl.unraid.net/unraid-api/tag/PR${{ steps.pr_number.outputs.pr_number }}/dynamix.unraid.net.plg"
|
||||
echo "Redirects users to staging: https://preview.dl.unraid.net/unraid-api/dynamix.unraid.net.plg"
|
||||
echo "Users updating from this PR version will automatically switch to staging"
|
||||
|
||||
- name: Comment on PR about staging redirect
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
comment-tag: pr-closed-staging
|
||||
mode: recreate
|
||||
message: |
|
||||
## 🔄 PR Merged - Plugin Redirected to Staging
|
||||
|
||||
This PR has been merged and the preview plugin has been updated to redirect to the staging version.
|
||||
|
||||
**For users testing this PR:**
|
||||
- Your plugin will automatically update to the staging version on the next update check
|
||||
- The staging version includes all merged changes from this PR
|
||||
- No manual intervention required
|
||||
|
||||
**Staging URL:**
|
||||
```
|
||||
https://preview.dl.unraid.net/unraid-api/dynamix.unraid.net.plg
|
||||
```
|
||||
|
||||
Thank you for testing! 🚀
|
||||
|
||||
2
.github/workflows/release-production.yml
vendored
2
.github/workflows/release-production.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
EOF
|
||||
- run: npm install html-escaper@2 xml2js
|
||||
- name: Update Plugin Changelog
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
@@ -1 +1 @@
|
||||
{".":"4.19.1"}
|
||||
{".":"4.21.0"}
|
||||
|
||||
@@ -75,18 +75,19 @@
|
||||
/*
|
||||
* Dynamic color variables for user overrides from GraphQL
|
||||
* These are set via JavaScript and override the theme defaults
|
||||
* Using :root with class for higher specificity to override theme classes
|
||||
*/
|
||||
.has-custom-header-text {
|
||||
:root.has-custom-header-text {
|
||||
--header-text-primary: var(--custom-header-text-primary);
|
||||
--color-header-text-primary: var(--custom-header-text-primary);
|
||||
}
|
||||
|
||||
.has-custom-header-meta {
|
||||
:root.has-custom-header-meta {
|
||||
--header-text-secondary: var(--custom-header-text-secondary);
|
||||
--color-header-text-secondary: var(--custom-header-text-secondary);
|
||||
}
|
||||
|
||||
.has-custom-header-bg {
|
||||
:root.has-custom-header-bg {
|
||||
--header-background-color: var(--custom-header-background-color);
|
||||
--color-header-background: var(--custom-header-background-color);
|
||||
--header-gradient-start: var(--custom-header-gradient-start);
|
||||
|
||||
@@ -31,3 +31,4 @@ BYPASS_CORS_CHECKS=true
|
||||
CHOKIDAR_USEPOLLING=true
|
||||
LOG_TRANSPORT=console
|
||||
LOG_LEVEL=trace
|
||||
ENABLE_NEXT_DOCKER_RELEASE=true
|
||||
|
||||
3
api/.gitignore
vendored
3
api/.gitignore
vendored
@@ -93,3 +93,6 @@ dev/local-session
|
||||
|
||||
# local OIDC config for testing - contains secrets
|
||||
dev/configs/oidc.local.json
|
||||
|
||||
# local api keys
|
||||
dev/keys/*
|
||||
|
||||
@@ -1,5 +1,63 @@
|
||||
# Changelog
|
||||
|
||||
## [4.21.0](https://github.com/unraid/api/compare/v4.20.4...v4.21.0) (2025-09-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add zsh shell detection to install script ([#1539](https://github.com/unraid/api/issues/1539)) ([50ea2a3](https://github.com/unraid/api/commit/50ea2a3ffb82b30152fb85e0fb9b0d178d596efe))
|
||||
* **api:** determine if docker container has update ([#1582](https://github.com/unraid/api/issues/1582)) ([e57d81e](https://github.com/unraid/api/commit/e57d81e0735772758bb85e0b3c89dce15c56635e))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* white on white login text ([ae4d3ec](https://github.com/unraid/api/commit/ae4d3ecbc417454ae3c6e02018f8e4c49bbfc902))
|
||||
|
||||
## [4.20.4](https://github.com/unraid/api/compare/v4.20.3...v4.20.4) (2025-09-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* staging PR plugin fixes + UI issues on 7.2 beta ([b79b44e](https://github.com/unraid/api/commit/b79b44e95c65a124313814ab55b0d0a745a799c7))
|
||||
|
||||
## [4.20.3](https://github.com/unraid/api/compare/v4.20.2...v4.20.3) (2025-09-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* header background color issues fixed on 7.2 - thanks Nick! ([73c1100](https://github.com/unraid/api/commit/73c1100d0ba396fe4342f8ce7561017ab821e68b))
|
||||
|
||||
## [4.20.2](https://github.com/unraid/api/compare/v4.20.1...v4.20.2) (2025-09-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* trigger deployment ([a27453f](https://github.com/unraid/api/commit/a27453fda81e4eeb07f257e60516bebbbc27cf7a))
|
||||
|
||||
## [4.20.1](https://github.com/unraid/api/compare/v4.20.0...v4.20.1) (2025-09-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* adjust header styles to fix flashing and width issues - thanks ZarZ ([4759b3d](https://github.com/unraid/api/commit/4759b3d0b3fb6bc71636f75f807cd6f4f62305d1))
|
||||
|
||||
## [4.20.0](https://github.com/unraid/api/compare/v4.19.1...v4.20.0) (2025-09-08)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **disks:** add isSpinning field to Disk type ([#1527](https://github.com/unraid/api/issues/1527)) ([193be3d](https://github.com/unraid/api/commit/193be3df3672514be9904e3d4fbdff776470afc0))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* better component loading to prevent per-page strange behavior ([095c222](https://github.com/unraid/api/commit/095c2221c94f144f8ad410a69362b15803765531))
|
||||
* **deps:** pin dependencies ([#1669](https://github.com/unraid/api/issues/1669)) ([413db4b](https://github.com/unraid/api/commit/413db4bd30a06aa69d3ca86e793782854f822589))
|
||||
* **plugin:** add fallback for unraid-api stop in deprecation cleanup ([#1668](https://github.com/unraid/api/issues/1668)) ([797bf50](https://github.com/unraid/api/commit/797bf50ec702ebc8244ff71a8ef1a80ea5cd2169))
|
||||
* prepend 'v' to API version in workflow dispatch inputs ([f0cffbd](https://github.com/unraid/api/commit/f0cffbdc7ac36e7037ab60fe9dddbb2cab4a5e10))
|
||||
* progress frame background color fix ([#1672](https://github.com/unraid/api/issues/1672)) ([785f1f5](https://github.com/unraid/api/commit/785f1f5eb1a1cc8b41f6eb502e4092d149cfbd80))
|
||||
* properly override header values ([#1673](https://github.com/unraid/api/issues/1673)) ([aecf70f](https://github.com/unraid/api/commit/aecf70ffad60c83074347d3d6ec23f73acbd1aee))
|
||||
|
||||
## [4.19.1](https://github.com/unraid/api/compare/v4.19.0...v4.19.1) (2025-09-05)
|
||||
|
||||
|
||||
|
||||
247
api/docs/developer/feature-flags.md
Normal file
247
api/docs/developer/feature-flags.md
Normal file
@@ -0,0 +1,247 @@
|
||||
# Feature Flags
|
||||
|
||||
Feature flags allow you to conditionally enable or disable functionality in the Unraid API. This is useful for gradually rolling out new features, A/B testing, or keeping experimental code behind flags during development.
|
||||
|
||||
## Setting Up Feature Flags
|
||||
|
||||
### 1. Define the Feature Flag
|
||||
|
||||
Feature flags are defined as environment variables and collected in `src/consts.ts`:
|
||||
|
||||
```typescript
|
||||
// src/environment.ts
|
||||
export const ENABLE_MY_NEW_FEATURE = process.env.ENABLE_MY_NEW_FEATURE === 'true';
|
||||
|
||||
// src/consts.ts
|
||||
export const FeatureFlags = Object.freeze({
|
||||
ENABLE_NEXT_DOCKER_RELEASE,
|
||||
ENABLE_MY_NEW_FEATURE, // Add your new flag here
|
||||
});
|
||||
```
|
||||
|
||||
### 2. Set the Environment Variable
|
||||
|
||||
Set the environment variable when running the API:
|
||||
|
||||
```bash
|
||||
ENABLE_MY_NEW_FEATURE=true unraid-api start
|
||||
```
|
||||
|
||||
Or add it to your `.env` file:
|
||||
|
||||
```env
|
||||
ENABLE_MY_NEW_FEATURE=true
|
||||
```
|
||||
|
||||
## Using Feature Flags in GraphQL
|
||||
|
||||
### Method 1: @UseFeatureFlag Decorator (Schema-Level)
|
||||
|
||||
The `@UseFeatureFlag` decorator conditionally includes or excludes GraphQL fields, queries, and mutations from the schema based on feature flags. When a feature flag is disabled, the field won't appear in the GraphQL schema at all.
|
||||
|
||||
```typescript
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
import { Query, Mutation, ResolveField } from '@nestjs/graphql';
|
||||
|
||||
@Resolver()
|
||||
export class MyResolver {
|
||||
|
||||
// Conditionally include a query
|
||||
@UseFeatureFlag('ENABLE_MY_NEW_FEATURE')
|
||||
@Query(() => String)
|
||||
async experimentalQuery() {
|
||||
return 'This query only exists when ENABLE_MY_NEW_FEATURE is true';
|
||||
}
|
||||
|
||||
// Conditionally include a mutation
|
||||
@UseFeatureFlag('ENABLE_MY_NEW_FEATURE')
|
||||
@Mutation(() => Boolean)
|
||||
async experimentalMutation() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Conditionally include a field resolver
|
||||
@UseFeatureFlag('ENABLE_MY_NEW_FEATURE')
|
||||
@ResolveField(() => String)
|
||||
async experimentalField() {
|
||||
return 'This field only exists when the flag is enabled';
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- Clean schema - disabled features don't appear in GraphQL introspection
|
||||
- No runtime overhead for disabled features
|
||||
- Clear feature boundaries
|
||||
|
||||
**Use when:**
|
||||
- You want to completely hide features from the GraphQL schema
|
||||
- The feature is experimental or in beta
|
||||
- You're doing a gradual rollout
|
||||
|
||||
### Method 2: checkFeatureFlag Function (Runtime)
|
||||
|
||||
The `checkFeatureFlag` function provides runtime feature flag checking within resolver methods. It throws a `ForbiddenException` if the feature is disabled.
|
||||
|
||||
```typescript
|
||||
import { checkFeatureFlag } from '@app/unraid-api/utils/feature-flag.helper.js';
|
||||
import { FeatureFlags } from '@app/consts.js';
|
||||
import { Query, ResolveField } from '@nestjs/graphql';
|
||||
|
||||
@Resolver()
|
||||
export class MyResolver {
|
||||
|
||||
@Query(() => String)
|
||||
async myQuery(
|
||||
@Args('useNewAlgorithm', { nullable: true }) useNewAlgorithm?: boolean
|
||||
) {
|
||||
// Conditionally use new logic based on feature flag
|
||||
if (useNewAlgorithm) {
|
||||
checkFeatureFlag(FeatureFlags, 'ENABLE_MY_NEW_FEATURE');
|
||||
return this.newAlgorithm();
|
||||
}
|
||||
|
||||
return this.oldAlgorithm();
|
||||
}
|
||||
|
||||
@ResolveField(() => String)
|
||||
async dataField() {
|
||||
// Check flag at the start of the method
|
||||
checkFeatureFlag(FeatureFlags, 'ENABLE_MY_NEW_FEATURE');
|
||||
|
||||
// Feature-specific logic here
|
||||
return this.computeExperimentalData();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- More granular control within methods
|
||||
- Can conditionally execute parts of a method
|
||||
- Useful for A/B testing scenarios
|
||||
- Good for gradual migration strategies
|
||||
|
||||
**Use when:**
|
||||
- You need conditional logic within a method
|
||||
- The field should exist but behavior changes based on the flag
|
||||
- You're migrating from old to new implementation gradually
|
||||
|
||||
## Feature Flag Patterns
|
||||
|
||||
### Pattern 1: Complete Feature Toggle
|
||||
|
||||
Hide an entire feature behind a flag:
|
||||
|
||||
```typescript
|
||||
@UseFeatureFlag('ENABLE_DOCKER_TEMPLATES')
|
||||
@Resolver(() => DockerTemplate)
|
||||
export class DockerTemplateResolver {
|
||||
// All resolvers in this class are toggled by the flag
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 2: Gradual Migration
|
||||
|
||||
Migrate from old to new implementation:
|
||||
|
||||
```typescript
|
||||
@Query(() => [Container])
|
||||
async getContainers(@Args('version') version?: string) {
|
||||
if (version === 'v2') {
|
||||
checkFeatureFlag(FeatureFlags, 'ENABLE_CONTAINERS_V2');
|
||||
return this.getContainersV2();
|
||||
}
|
||||
|
||||
return this.getContainersV1();
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 3: Beta Features
|
||||
|
||||
Mark features as beta:
|
||||
|
||||
```typescript
|
||||
@UseFeatureFlag('ENABLE_BETA_FEATURES')
|
||||
@ResolveField(() => BetaMetrics, {
|
||||
description: 'BETA: Advanced metrics (requires ENABLE_BETA_FEATURES flag)'
|
||||
})
|
||||
async betaMetrics() {
|
||||
return this.computeBetaMetrics();
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 4: Performance Optimizations
|
||||
|
||||
Toggle expensive operations:
|
||||
|
||||
```typescript
|
||||
@ResolveField(() => Statistics)
|
||||
async statistics() {
|
||||
const basicStats = await this.getBasicStats();
|
||||
|
||||
try {
|
||||
checkFeatureFlag(FeatureFlags, 'ENABLE_ADVANCED_ANALYTICS');
|
||||
const advancedStats = await this.getAdvancedStats();
|
||||
return { ...basicStats, ...advancedStats };
|
||||
} catch {
|
||||
// Feature disabled, return only basic stats
|
||||
return basicStats;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing with Feature Flags
|
||||
|
||||
When writing tests for feature-flagged code, create a mock to control feature flag values:
|
||||
|
||||
```typescript
|
||||
import { vi } from 'vitest';
|
||||
|
||||
// Mock the entire consts module
|
||||
vi.mock('@app/consts.js', async () => {
|
||||
const actual = await vi.importActual('@app/consts.js');
|
||||
return {
|
||||
...actual,
|
||||
FeatureFlags: {
|
||||
ENABLE_MY_NEW_FEATURE: true, // Set your test value
|
||||
ENABLE_NEXT_DOCKER_RELEASE: false,
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
describe('MyResolver', () => {
|
||||
it('should execute new logic when feature is enabled', async () => {
|
||||
// Test new behavior with mocked flag
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Naming Convention**: Use `ENABLE_` prefix for boolean feature flags
|
||||
2. **Environment Variables**: Always use uppercase with underscores
|
||||
3. **Documentation**: Document what each feature flag controls
|
||||
4. **Cleanup**: Remove feature flags once features are stable and fully rolled out
|
||||
5. **Default State**: New features should default to `false` (disabled)
|
||||
6. **Granularity**: Keep feature flags focused on a single feature or capability
|
||||
7. **Testing**: Always test both enabled and disabled states
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
- **Experimental Features**: Hide unstable features in production
|
||||
- **Gradual Rollouts**: Enable features for specific environments first
|
||||
- **A/B Testing**: Toggle between different implementations
|
||||
- **Performance**: Disable expensive operations when not needed
|
||||
- **Breaking Changes**: Provide migration path with both old and new behavior
|
||||
- **Debug Features**: Enable additional logging or debugging tools
|
||||
|
||||
## Checking Active Feature Flags
|
||||
|
||||
To see which feature flags are currently active:
|
||||
|
||||
```typescript
|
||||
// Log all feature flags on startup
|
||||
console.log('Active Feature Flags:', FeatureFlags);
|
||||
```
|
||||
|
||||
Or check via GraphQL introspection to see which fields are available based on current flags.
|
||||
@@ -139,6 +139,9 @@ type ArrayDisk implements Node {
|
||||
"""ata | nvme | usb | (others)"""
|
||||
transport: String
|
||||
color: ArrayDiskFsColor
|
||||
|
||||
"""Whether the disk is currently spinning"""
|
||||
isSpinning: Boolean
|
||||
}
|
||||
|
||||
interface Node {
|
||||
@@ -346,6 +349,9 @@ type Disk implements Node {
|
||||
|
||||
"""The partitions on the disk"""
|
||||
partitions: [DiskPartition!]!
|
||||
|
||||
"""Whether the disk is spinning or not"""
|
||||
isSpinning: Boolean!
|
||||
}
|
||||
|
||||
"""The type of interface the disk uses to connect to the system"""
|
||||
@@ -1044,6 +1050,19 @@ enum ThemeName {
|
||||
white
|
||||
}
|
||||
|
||||
type ExplicitStatusItem {
|
||||
name: String!
|
||||
updateStatus: UpdateStatus!
|
||||
}
|
||||
|
||||
"""Update status of a container."""
|
||||
enum UpdateStatus {
|
||||
UP_TO_DATE
|
||||
UPDATE_AVAILABLE
|
||||
REBUILD_READY
|
||||
UNKNOWN
|
||||
}
|
||||
|
||||
type ContainerPort {
|
||||
ip: String
|
||||
privatePort: Port
|
||||
@@ -1083,6 +1102,8 @@ type DockerContainer implements Node {
|
||||
networkSettings: JSON
|
||||
mounts: [JSON!]
|
||||
autoStart: Boolean!
|
||||
isUpdateAvailable: Boolean
|
||||
isRebuildReady: Boolean
|
||||
}
|
||||
|
||||
enum ContainerState {
|
||||
@@ -1113,6 +1134,7 @@ type Docker implements Node {
|
||||
containers(skipCache: Boolean! = false): [DockerContainer!]!
|
||||
networks(skipCache: Boolean! = false): [DockerNetwork!]!
|
||||
organizer: ResolvedOrganizerV1!
|
||||
containerUpdateStatuses: [ExplicitStatusItem!]!
|
||||
}
|
||||
|
||||
type ResolvedOrganizerView {
|
||||
@@ -2413,6 +2435,7 @@ type Mutation {
|
||||
setDockerFolderChildren(folderId: String, childrenIds: [String!]!): ResolvedOrganizerV1!
|
||||
deleteDockerEntries(entryIds: [String!]!): ResolvedOrganizerV1!
|
||||
moveDockerEntriesToFolder(sourceEntryIds: [String!]!, destinationFolderId: String!): ResolvedOrganizerV1!
|
||||
refreshDockerDigests: Boolean!
|
||||
|
||||
"""Initiates a flash drive backup using a configured remote."""
|
||||
initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@unraid/api",
|
||||
"version": "4.19.1",
|
||||
"version": "4.21.0",
|
||||
"main": "src/cli/index.ts",
|
||||
"type": "module",
|
||||
"corepack": {
|
||||
@@ -94,7 +94,7 @@
|
||||
"command-exists": "1.2.9",
|
||||
"convert": "5.12.0",
|
||||
"cookie": "1.0.2",
|
||||
"cron": "4.3.3",
|
||||
"cron": "4.3.0",
|
||||
"cross-fetch": "4.1.0",
|
||||
"diff": "8.0.2",
|
||||
"dockerode": "4.0.7",
|
||||
|
||||
@@ -12,7 +12,22 @@ import {
|
||||
UpdateRCloneRemoteDto,
|
||||
} from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js';
|
||||
|
||||
vi.mock('got');
|
||||
vi.mock('got', () => {
|
||||
const mockPost = vi.fn();
|
||||
const gotMock = {
|
||||
post: mockPost,
|
||||
};
|
||||
return {
|
||||
default: gotMock,
|
||||
HTTPError: class HTTPError extends Error {
|
||||
response?: any;
|
||||
constructor(response?: any) {
|
||||
super('HTTP Error');
|
||||
this.response = response;
|
||||
}
|
||||
},
|
||||
};
|
||||
});
|
||||
vi.mock('execa');
|
||||
vi.mock('p-retry');
|
||||
vi.mock('node:fs', () => ({
|
||||
@@ -60,7 +75,7 @@ vi.mock('@nestjs/common', async (importOriginal) => {
|
||||
|
||||
describe('RCloneApiService', () => {
|
||||
let service: RCloneApiService;
|
||||
let mockGot: any;
|
||||
let mockGotPost: any;
|
||||
let mockExeca: any;
|
||||
let mockPRetry: any;
|
||||
let mockExistsSync: any;
|
||||
@@ -68,19 +83,19 @@ describe('RCloneApiService', () => {
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
const { default: got } = await import('got');
|
||||
const got = await import('got');
|
||||
const { execa } = await import('execa');
|
||||
const pRetry = await import('p-retry');
|
||||
const { existsSync } = await import('node:fs');
|
||||
const { fileExists } = await import('@app/core/utils/files/file-exists.js');
|
||||
|
||||
mockGot = vi.mocked(got);
|
||||
mockGotPost = vi.mocked(got.default.post);
|
||||
mockExeca = vi.mocked(execa);
|
||||
mockPRetry = vi.mocked(pRetry.default);
|
||||
mockExistsSync = vi.mocked(existsSync);
|
||||
|
||||
// Mock successful RClone API response for socket check
|
||||
mockGot.post = vi.fn().mockResolvedValue({ body: { pid: 12345 } });
|
||||
mockGotPost.mockResolvedValue({ body: { pid: 12345 } });
|
||||
|
||||
// Mock RClone binary exists check
|
||||
vi.mocked(fileExists).mockResolvedValue(true);
|
||||
@@ -97,10 +112,10 @@ describe('RCloneApiService', () => {
|
||||
mockPRetry.mockResolvedValue(undefined);
|
||||
|
||||
service = new RCloneApiService();
|
||||
await service.onModuleInit();
|
||||
await service.onApplicationBootstrap();
|
||||
|
||||
// Reset the mock after initialization to prepare for test-specific responses
|
||||
mockGot.post.mockClear();
|
||||
mockGotPost.mockClear();
|
||||
});
|
||||
|
||||
describe('getProviders', () => {
|
||||
@@ -109,15 +124,15 @@ describe('RCloneApiService', () => {
|
||||
{ name: 'aws', prefix: 's3', description: 'Amazon S3' },
|
||||
{ name: 'google', prefix: 'drive', description: 'Google Drive' },
|
||||
];
|
||||
mockGot.post.mockResolvedValue({
|
||||
mockGotPost.mockResolvedValue({
|
||||
body: { providers: mockProviders },
|
||||
});
|
||||
|
||||
const result = await service.getProviders();
|
||||
|
||||
expect(result).toEqual(mockProviders);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/config/providers',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/config\/providers$/),
|
||||
expect.objectContaining({
|
||||
json: {},
|
||||
responseType: 'json',
|
||||
@@ -130,7 +145,7 @@ describe('RCloneApiService', () => {
|
||||
});
|
||||
|
||||
it('should return empty array when no providers', async () => {
|
||||
mockGot.post.mockResolvedValue({ body: {} });
|
||||
mockGotPost.mockResolvedValue({ body: {} });
|
||||
|
||||
const result = await service.getProviders();
|
||||
|
||||
@@ -141,15 +156,15 @@ describe('RCloneApiService', () => {
|
||||
describe('listRemotes', () => {
|
||||
it('should return list of remotes', async () => {
|
||||
const mockRemotes = ['backup-s3', 'drive-storage'];
|
||||
mockGot.post.mockResolvedValue({
|
||||
mockGotPost.mockResolvedValue({
|
||||
body: { remotes: mockRemotes },
|
||||
});
|
||||
|
||||
const result = await service.listRemotes();
|
||||
|
||||
expect(result).toEqual(mockRemotes);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/config/listremotes',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/config\/listremotes$/),
|
||||
expect.objectContaining({
|
||||
json: {},
|
||||
responseType: 'json',
|
||||
@@ -162,7 +177,7 @@ describe('RCloneApiService', () => {
|
||||
});
|
||||
|
||||
it('should return empty array when no remotes', async () => {
|
||||
mockGot.post.mockResolvedValue({ body: {} });
|
||||
mockGotPost.mockResolvedValue({ body: {} });
|
||||
|
||||
const result = await service.listRemotes();
|
||||
|
||||
@@ -174,13 +189,13 @@ describe('RCloneApiService', () => {
|
||||
it('should return remote details', async () => {
|
||||
const input: GetRCloneRemoteDetailsDto = { name: 'test-remote' };
|
||||
const mockConfig = { type: 's3', provider: 'AWS' };
|
||||
mockGot.post.mockResolvedValue({ body: mockConfig });
|
||||
mockGotPost.mockResolvedValue({ body: mockConfig });
|
||||
|
||||
const result = await service.getRemoteDetails(input);
|
||||
|
||||
expect(result).toEqual(mockConfig);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/config/get',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/config\/get$/),
|
||||
expect.objectContaining({
|
||||
json: { name: 'test-remote' },
|
||||
responseType: 'json',
|
||||
@@ -197,7 +212,7 @@ describe('RCloneApiService', () => {
|
||||
it('should return remote configuration', async () => {
|
||||
const input: GetRCloneRemoteConfigDto = { name: 'test-remote' };
|
||||
const mockConfig = { type: 's3', access_key_id: 'AKIA...' };
|
||||
mockGot.post.mockResolvedValue({ body: mockConfig });
|
||||
mockGotPost.mockResolvedValue({ body: mockConfig });
|
||||
|
||||
const result = await service.getRemoteConfig(input);
|
||||
|
||||
@@ -213,13 +228,13 @@ describe('RCloneApiService', () => {
|
||||
parameters: { access_key_id: 'AKIA...', secret_access_key: 'secret' },
|
||||
};
|
||||
const mockResponse = { success: true };
|
||||
mockGot.post.mockResolvedValue({ body: mockResponse });
|
||||
mockGotPost.mockResolvedValue({ body: mockResponse });
|
||||
|
||||
const result = await service.createRemote(input);
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/config/create',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/config\/create$/),
|
||||
expect.objectContaining({
|
||||
json: {
|
||||
name: 'new-remote',
|
||||
@@ -243,13 +258,13 @@ describe('RCloneApiService', () => {
|
||||
parameters: { access_key_id: 'NEW_AKIA...' },
|
||||
};
|
||||
const mockResponse = { success: true };
|
||||
mockGot.post.mockResolvedValue({ body: mockResponse });
|
||||
mockGotPost.mockResolvedValue({ body: mockResponse });
|
||||
|
||||
const result = await service.updateRemote(input);
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/config/update',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/config\/update$/),
|
||||
expect.objectContaining({
|
||||
json: {
|
||||
name: 'existing-remote',
|
||||
@@ -269,13 +284,13 @@ describe('RCloneApiService', () => {
|
||||
it('should delete a remote', async () => {
|
||||
const input: DeleteRCloneRemoteDto = { name: 'remote-to-delete' };
|
||||
const mockResponse = { success: true };
|
||||
mockGot.post.mockResolvedValue({ body: mockResponse });
|
||||
mockGotPost.mockResolvedValue({ body: mockResponse });
|
||||
|
||||
const result = await service.deleteRemote(input);
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/config/delete',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/config\/delete$/),
|
||||
expect.objectContaining({
|
||||
json: { name: 'remote-to-delete' },
|
||||
responseType: 'json',
|
||||
@@ -296,13 +311,13 @@ describe('RCloneApiService', () => {
|
||||
options: { delete_on: 'dst' },
|
||||
};
|
||||
const mockResponse = { jobid: 'job-123' };
|
||||
mockGot.post.mockResolvedValue({ body: mockResponse });
|
||||
mockGotPost.mockResolvedValue({ body: mockResponse });
|
||||
|
||||
const result = await service.startBackup(input);
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/sync/copy',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/sync\/copy$/),
|
||||
expect.objectContaining({
|
||||
json: {
|
||||
srcFs: '/source/path',
|
||||
@@ -323,13 +338,13 @@ describe('RCloneApiService', () => {
|
||||
it('should return job status', async () => {
|
||||
const input: GetRCloneJobStatusDto = { jobId: 'job-123' };
|
||||
const mockStatus = { status: 'running', progress: 0.5 };
|
||||
mockGot.post.mockResolvedValue({ body: mockStatus });
|
||||
mockGotPost.mockResolvedValue({ body: mockStatus });
|
||||
|
||||
const result = await service.getJobStatus(input);
|
||||
|
||||
expect(result).toEqual(mockStatus);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/job/status',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/job\/status$/),
|
||||
expect.objectContaining({
|
||||
json: { jobid: 'job-123' },
|
||||
responseType: 'json',
|
||||
@@ -348,13 +363,13 @@ describe('RCloneApiService', () => {
|
||||
{ id: 'job-1', status: 'running' },
|
||||
{ id: 'job-2', status: 'finished' },
|
||||
];
|
||||
mockGot.post.mockResolvedValue({ body: mockJobs });
|
||||
mockGotPost.mockResolvedValue({ body: mockJobs });
|
||||
|
||||
const result = await service.listRunningJobs();
|
||||
|
||||
expect(result).toEqual(mockJobs);
|
||||
expect(mockGot.post).toHaveBeenCalledWith(
|
||||
'http://unix:/tmp/rclone.sock:/job/list',
|
||||
expect(mockGotPost).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/\/job\/list$/),
|
||||
expect.objectContaining({
|
||||
json: {},
|
||||
responseType: 'json',
|
||||
@@ -378,7 +393,7 @@ describe('RCloneApiService', () => {
|
||||
},
|
||||
};
|
||||
Object.setPrototypeOf(httpError, HTTPError.prototype);
|
||||
mockGot.post.mockRejectedValue(httpError);
|
||||
mockGotPost.mockRejectedValue(httpError);
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Rclone API Error (config/providers, HTTP 500): Rclone Error: Internal server error'
|
||||
@@ -395,7 +410,7 @@ describe('RCloneApiService', () => {
|
||||
},
|
||||
};
|
||||
Object.setPrototypeOf(httpError, HTTPError.prototype);
|
||||
mockGot.post.mockRejectedValue(httpError);
|
||||
mockGotPost.mockRejectedValue(httpError);
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Rclone API Error (config/providers, HTTP 404): Failed to process error response body. Raw body:'
|
||||
@@ -412,7 +427,7 @@ describe('RCloneApiService', () => {
|
||||
},
|
||||
};
|
||||
Object.setPrototypeOf(httpError, HTTPError.prototype);
|
||||
mockGot.post.mockRejectedValue(httpError);
|
||||
mockGotPost.mockRejectedValue(httpError);
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Rclone API Error (config/providers, HTTP 400): Failed to process error response body. Raw body: invalid json'
|
||||
@@ -421,17 +436,108 @@ describe('RCloneApiService', () => {
|
||||
|
||||
it('should handle non-HTTP errors', async () => {
|
||||
const networkError = new Error('Network connection failed');
|
||||
mockGot.post.mockRejectedValue(networkError);
|
||||
mockGotPost.mockRejectedValue(networkError);
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow('Network connection failed');
|
||||
});
|
||||
|
||||
it('should handle unknown errors', async () => {
|
||||
mockGot.post.mockRejectedValue('unknown error');
|
||||
mockGotPost.mockRejectedValue('unknown error');
|
||||
|
||||
await expect(service.getProviders()).rejects.toThrow(
|
||||
'Unknown error calling RClone API (config/providers) with params {}: unknown error'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkRcloneBinaryExists', () => {
|
||||
beforeEach(() => {
|
||||
// Create a new service instance without initializing for these tests
|
||||
service = new RCloneApiService();
|
||||
});
|
||||
|
||||
it('should return true when rclone version is 1.70.0', async () => {
|
||||
mockExeca.mockResolvedValueOnce({
|
||||
stdout: 'rclone v1.70.0\n- os/version: darwin 14.0 (64 bit)\n- os/kernel: 23.0.0 (arm64)',
|
||||
stderr: '',
|
||||
} as any);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true when rclone version is newer than 1.70.0', async () => {
|
||||
mockExeca.mockResolvedValueOnce({
|
||||
stdout: 'rclone v1.75.2\n- os/version: darwin 14.0 (64 bit)\n- os/kernel: 23.0.0 (arm64)',
|
||||
stderr: '',
|
||||
} as any);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when rclone version is older than 1.70.0', async () => {
|
||||
mockExeca.mockResolvedValueOnce({
|
||||
stdout: 'rclone v1.69.0\n- os/version: darwin 14.0 (64 bit)\n- os/kernel: 23.0.0 (arm64)',
|
||||
stderr: '',
|
||||
} as any);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when rclone version is much older', async () => {
|
||||
mockExeca.mockResolvedValueOnce({
|
||||
stdout: 'rclone v1.50.0\n- os/version: darwin 14.0 (64 bit)\n- os/kernel: 23.0.0 (arm64)',
|
||||
stderr: '',
|
||||
} as any);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when version cannot be parsed', async () => {
|
||||
mockExeca.mockResolvedValueOnce({
|
||||
stdout: 'rclone unknown version format',
|
||||
stderr: '',
|
||||
} as any);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when rclone binary is not found', async () => {
|
||||
const error = new Error('Command not found') as any;
|
||||
error.code = 'ENOENT';
|
||||
mockExeca.mockRejectedValueOnce(error);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false and log error for other exceptions', async () => {
|
||||
mockExeca.mockRejectedValueOnce(new Error('Some other error'));
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle beta/rc versions correctly', async () => {
|
||||
mockExeca.mockResolvedValueOnce({
|
||||
stdout: 'rclone v1.70.0-beta.1\n- os/version: darwin 14.0 (64 bit)\n- os/kernel: 23.0.0 (arm64)',
|
||||
stderr: '',
|
||||
} as any);
|
||||
|
||||
const result = await (service as any).checkRcloneBinaryExists();
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -2,7 +2,7 @@ import { join } from 'path';
|
||||
|
||||
import type { JSONWebKeySet } from 'jose';
|
||||
|
||||
import { PORT } from '@app/environment.js';
|
||||
import { ENABLE_NEXT_DOCKER_RELEASE, PORT } from '@app/environment.js';
|
||||
|
||||
export const getInternalApiAddress = (isHttp = true, nginxPort = 80) => {
|
||||
const envPort = PORT;
|
||||
@@ -79,3 +79,14 @@ export const KEYSERVER_VALIDATION_ENDPOINT = 'https://keys.lime-technology.com/v
|
||||
|
||||
/** Set the max retries for the GraphQL Client */
|
||||
export const MAX_RETRIES_FOR_LINEAR_BACKOFF = 100;
|
||||
|
||||
/**
|
||||
* Feature flags are used to conditionally enable or disable functionality in the Unraid API.
|
||||
*
|
||||
* Keys are human readable feature flag names -- will be used to construct error messages.
|
||||
*
|
||||
* Values are boolean/truthy values.
|
||||
*/
|
||||
export const FeatureFlags = Object.freeze({
|
||||
ENABLE_NEXT_DOCKER_RELEASE,
|
||||
});
|
||||
|
||||
@@ -110,3 +110,6 @@ export const PATHS_CONFIG_MODULES =
|
||||
|
||||
export const PATHS_LOCAL_SESSION_FILE =
|
||||
process.env.PATHS_LOCAL_SESSION_FILE ?? '/var/run/unraid-api/local-session';
|
||||
|
||||
/** feature flag for the upcoming docker release */
|
||||
export const ENABLE_NEXT_DOCKER_RELEASE = process.env.ENABLE_NEXT_DOCKER_RELEASE === 'true';
|
||||
|
||||
@@ -14,6 +14,7 @@ import { AuthModule } from '@app/unraid-api/auth/auth.module.js';
|
||||
import { AuthenticationGuard } from '@app/unraid-api/auth/authentication.guard.js';
|
||||
import { LegacyConfigModule } from '@app/unraid-api/config/legacy-config.module.js';
|
||||
import { CronModule } from '@app/unraid-api/cron/cron.module.js';
|
||||
import { JobModule } from '@app/unraid-api/cron/job.module.js';
|
||||
import { GraphModule } from '@app/unraid-api/graph/graph.module.js';
|
||||
import { GlobalDepsModule } from '@app/unraid-api/plugin/global-deps.module.js';
|
||||
import { RestModule } from '@app/unraid-api/rest/rest.module.js';
|
||||
@@ -24,7 +25,7 @@ import { UnraidFileModifierModule } from '@app/unraid-api/unraid-file-modifier/u
|
||||
GlobalDepsModule,
|
||||
LegacyConfigModule,
|
||||
PubSubModule,
|
||||
ScheduleModule.forRoot(),
|
||||
JobModule,
|
||||
LoggerModule.forRoot({
|
||||
pinoHttp: {
|
||||
logger: apiLogger,
|
||||
|
||||
@@ -241,6 +241,8 @@ export type ArrayDisk = Node & {
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
/** Array slot number. Parity1 is always 0 and Parity2 is always 29. Array slots will be 1 - 28. Cache slots are 30 - 53. Flash is 54. */
|
||||
idx: Scalars['Int']['output'];
|
||||
/** Whether the disk is currently spinning */
|
||||
isSpinning?: Maybe<Scalars['Boolean']['output']>;
|
||||
name?: Maybe<Scalars['String']['output']>;
|
||||
/** Number of unrecoverable errors reported by the device I/O drivers. Missing data due to unrecoverable array read errors is filled in on-the-fly using parity reconstruct (and we attempt to write this data back to the sector(s) which failed). Any unrecoverable write error results in disabling the disk. */
|
||||
numErrors?: Maybe<Scalars['BigInt']['output']>;
|
||||
@@ -607,6 +609,8 @@ export type Disk = Node & {
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
/** The interface type of the disk */
|
||||
interfaceType: DiskInterfaceType;
|
||||
/** Whether the disk is spinning or not */
|
||||
isSpinning: Scalars['Boolean']['output'];
|
||||
/** The model name of the disk */
|
||||
name: Scalars['String']['output'];
|
||||
/** The partitions on the disk */
|
||||
@@ -674,6 +678,7 @@ export enum DiskSmartStatus {
|
||||
|
||||
export type Docker = Node & {
|
||||
__typename?: 'Docker';
|
||||
containerUpdateStatuses: Array<ExplicitStatusItem>;
|
||||
containers: Array<DockerContainer>;
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
networks: Array<DockerNetwork>;
|
||||
@@ -699,6 +704,8 @@ export type DockerContainer = Node & {
|
||||
id: Scalars['PrefixedID']['output'];
|
||||
image: Scalars['String']['output'];
|
||||
imageId: Scalars['String']['output'];
|
||||
isRebuildReady?: Maybe<Scalars['Boolean']['output']>;
|
||||
isUpdateAvailable?: Maybe<Scalars['Boolean']['output']>;
|
||||
labels?: Maybe<Scalars['JSON']['output']>;
|
||||
mounts?: Maybe<Array<Scalars['JSON']['output']>>;
|
||||
names: Array<Scalars['String']['output']>;
|
||||
@@ -770,6 +777,12 @@ export type EnableDynamicRemoteAccessInput = {
|
||||
url: AccessUrlInput;
|
||||
};
|
||||
|
||||
export type ExplicitStatusItem = {
|
||||
__typename?: 'ExplicitStatusItem';
|
||||
name: Scalars['String']['output'];
|
||||
updateStatus: UpdateStatus;
|
||||
};
|
||||
|
||||
export type Flash = Node & {
|
||||
__typename?: 'Flash';
|
||||
guid: Scalars['String']['output'];
|
||||
@@ -1225,6 +1238,7 @@ export type Mutation = {
|
||||
rclone: RCloneMutations;
|
||||
/** Reads each notification to recompute & update the overview. */
|
||||
recalculateOverview: NotificationOverview;
|
||||
refreshDockerDigests: Scalars['Boolean']['output'];
|
||||
/** Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. */
|
||||
removePlugin: Scalars['Boolean']['output'];
|
||||
setDockerFolderChildren: ResolvedOrganizerV1;
|
||||
@@ -2260,6 +2274,14 @@ export type UpdateSettingsResponse = {
|
||||
warnings?: Maybe<Array<Scalars['String']['output']>>;
|
||||
};
|
||||
|
||||
/** Update status of a container. */
|
||||
export enum UpdateStatus {
|
||||
REBUILD_READY = 'REBUILD_READY',
|
||||
UNKNOWN = 'UNKNOWN',
|
||||
UPDATE_AVAILABLE = 'UPDATE_AVAILABLE',
|
||||
UP_TO_DATE = 'UP_TO_DATE'
|
||||
}
|
||||
|
||||
export type Uptime = {
|
||||
__typename?: 'Uptime';
|
||||
timestamp?: Maybe<Scalars['String']['output']>;
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
import { ScheduleModule } from '@nestjs/schedule';
|
||||
|
||||
import { JobModule } from '@app/unraid-api/cron/job.module.js';
|
||||
import { LogRotateService } from '@app/unraid-api/cron/log-rotate.service.js';
|
||||
import { WriteFlashFileService } from '@app/unraid-api/cron/write-flash-file.service.js';
|
||||
|
||||
@Module({
|
||||
imports: [],
|
||||
imports: [JobModule],
|
||||
providers: [WriteFlashFileService, LogRotateService],
|
||||
})
|
||||
export class CronModule {}
|
||||
|
||||
13
api/src/unraid-api/cron/job.module.ts
Normal file
13
api/src/unraid-api/cron/job.module.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
import { ScheduleModule } from '@nestjs/schedule';
|
||||
|
||||
/**
|
||||
* Sets up common dependencies for initializing jobs (e.g. scheduler registry, cron jobs).
|
||||
*
|
||||
* Simplifies testing setup & application dependency tree by ensuring `forRoot` is called only once.
|
||||
*/
|
||||
@Module({
|
||||
imports: [ScheduleModule.forRoot()],
|
||||
exports: [ScheduleModule],
|
||||
})
|
||||
export class JobModule {}
|
||||
172
api/src/unraid-api/decorators/omit-if.decorator.spec.ts
Normal file
172
api/src/unraid-api/decorators/omit-if.decorator.spec.ts
Normal file
@@ -0,0 +1,172 @@
|
||||
import { Reflector } from '@nestjs/core';
|
||||
import { Field, Mutation, ObjectType, Query, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { OMIT_IF_METADATA_KEY, OmitIf } from '@app/unraid-api/decorators/omit-if.decorator.js';
|
||||
|
||||
describe('OmitIf Decorator', () => {
|
||||
let reflector: Reflector;
|
||||
|
||||
beforeEach(() => {
|
||||
reflector = new Reflector();
|
||||
});
|
||||
|
||||
describe('OmitIf', () => {
|
||||
it('should set metadata when condition is true', () => {
|
||||
class TestResolver {
|
||||
@OmitIf(true)
|
||||
testMethod() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testMethod);
|
||||
expect(metadata).toBe(true);
|
||||
});
|
||||
|
||||
it('should not set metadata when condition is false', () => {
|
||||
class TestResolver {
|
||||
@OmitIf(false)
|
||||
testMethod() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testMethod);
|
||||
expect(metadata).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should evaluate function conditions', () => {
|
||||
const mockCondition = vi.fn(() => true);
|
||||
|
||||
class TestResolver {
|
||||
@OmitIf(mockCondition)
|
||||
testMethod() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
expect(mockCondition).toHaveBeenCalledOnce();
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testMethod);
|
||||
expect(metadata).toBe(true);
|
||||
});
|
||||
|
||||
it('should evaluate function conditions that return false', () => {
|
||||
const mockCondition = vi.fn(() => false);
|
||||
|
||||
class TestResolver {
|
||||
@OmitIf(mockCondition)
|
||||
testMethod() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
expect(mockCondition).toHaveBeenCalledOnce();
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testMethod);
|
||||
expect(metadata).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should work with environment variables', () => {
|
||||
const originalEnv = process.env.NODE_ENV;
|
||||
process.env.NODE_ENV = 'production';
|
||||
|
||||
class TestResolver {
|
||||
@OmitIf(process.env.NODE_ENV === 'production')
|
||||
testMethod() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testMethod);
|
||||
expect(metadata).toBe(true);
|
||||
|
||||
process.env.NODE_ENV = originalEnv;
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration with NestJS GraphQL decorators', () => {
|
||||
it('should work with @Query decorator', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@OmitIf(true)
|
||||
@Query(() => String)
|
||||
omittedQuery() {
|
||||
return 'test';
|
||||
}
|
||||
|
||||
@OmitIf(false)
|
||||
@Query(() => String)
|
||||
includedQuery() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const omittedMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.omittedQuery);
|
||||
const includedMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.includedQuery);
|
||||
|
||||
expect(omittedMetadata).toBe(true);
|
||||
expect(includedMetadata).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should work with @Mutation decorator', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@OmitIf(true)
|
||||
@Mutation(() => String)
|
||||
omittedMutation() {
|
||||
return 'test';
|
||||
}
|
||||
|
||||
@OmitIf(false)
|
||||
@Mutation(() => String)
|
||||
includedMutation() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const omittedMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.omittedMutation);
|
||||
const includedMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.includedMutation);
|
||||
|
||||
expect(omittedMetadata).toBe(true);
|
||||
expect(includedMetadata).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should work with @ResolveField decorator', () => {
|
||||
@ObjectType()
|
||||
class TestType {
|
||||
@Field()
|
||||
id: string = '';
|
||||
}
|
||||
|
||||
@Resolver(() => TestType)
|
||||
class TestResolver {
|
||||
@OmitIf(true)
|
||||
@ResolveField(() => String)
|
||||
omittedField() {
|
||||
return 'test';
|
||||
}
|
||||
|
||||
@OmitIf(false)
|
||||
@ResolveField(() => String)
|
||||
includedField() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const omittedMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.omittedField);
|
||||
const includedMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.includedField);
|
||||
|
||||
expect(omittedMetadata).toBe(true);
|
||||
expect(includedMetadata).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
80
api/src/unraid-api/decorators/omit-if.decorator.ts
Normal file
80
api/src/unraid-api/decorators/omit-if.decorator.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
import { SetMetadata } from '@nestjs/common';
|
||||
import { Extensions } from '@nestjs/graphql';
|
||||
|
||||
import { MapperKind, mapSchema } from '@graphql-tools/utils';
|
||||
import { GraphQLFieldConfig, GraphQLSchema } from 'graphql';
|
||||
|
||||
export const OMIT_IF_METADATA_KEY = 'omitIf';
|
||||
|
||||
/**
|
||||
* Decorator that conditionally omits a GraphQL field/query/mutation based on a condition.
|
||||
* The field will only be omitted from the schema when the condition evaluates to true.
|
||||
*
|
||||
* @param condition - If the condition evaluates to true, the field will be omitted from the schema
|
||||
* @returns A decorator that wraps the target field/query/mutation
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* @OmitIf(process.env.NODE_ENV === 'production')
|
||||
* @Query(() => String)
|
||||
* async debugQuery() {
|
||||
* return 'This query is omitted in production';
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export function OmitIf(condition: boolean | (() => boolean)): MethodDecorator & PropertyDecorator {
|
||||
const shouldOmit = typeof condition === 'function' ? condition() : condition;
|
||||
|
||||
return (target: object, propertyKey?: string | symbol, descriptor?: PropertyDescriptor) => {
|
||||
if (shouldOmit) {
|
||||
SetMetadata(OMIT_IF_METADATA_KEY, true)(
|
||||
target,
|
||||
propertyKey as string,
|
||||
descriptor as PropertyDescriptor
|
||||
);
|
||||
Extensions({ omitIf: true })(
|
||||
target,
|
||||
propertyKey as string,
|
||||
descriptor as PropertyDescriptor
|
||||
);
|
||||
}
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Schema transformer that omits fields/queries/mutations based on the OmitIf decorator.
|
||||
* @param schema - The GraphQL schema to transform
|
||||
* @returns The transformed GraphQL schema
|
||||
*/
|
||||
export function omitIfSchemaTransformer(schema: GraphQLSchema): GraphQLSchema {
|
||||
return mapSchema(schema, {
|
||||
[MapperKind.OBJECT_FIELD]: (
|
||||
fieldConfig: GraphQLFieldConfig<any, any>,
|
||||
fieldName: string,
|
||||
typeName: string
|
||||
) => {
|
||||
const extensions = fieldConfig.extensions || {};
|
||||
|
||||
if (extensions.omitIf === true) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return fieldConfig;
|
||||
},
|
||||
[MapperKind.ROOT_FIELD]: (
|
||||
fieldConfig: GraphQLFieldConfig<any, any>,
|
||||
fieldName: string,
|
||||
typeName: string
|
||||
) => {
|
||||
const extensions = fieldConfig.extensions || {};
|
||||
|
||||
if (extensions.omitIf === true) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return fieldConfig;
|
||||
},
|
||||
});
|
||||
}
|
||||
317
api/src/unraid-api/decorators/use-feature-flag.decorator.spec.ts
Normal file
317
api/src/unraid-api/decorators/use-feature-flag.decorator.spec.ts
Normal file
@@ -0,0 +1,317 @@
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-nocheck
|
||||
// fixme: types don't sync with mocks, and there's no override to simplify testing.
|
||||
|
||||
import { Reflector } from '@nestjs/core';
|
||||
import { Mutation, Query, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { OMIT_IF_METADATA_KEY } from '@app/unraid-api/decorators/omit-if.decorator.js';
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
|
||||
// Mock the FeatureFlags
|
||||
vi.mock('@app/consts.js', () => ({
|
||||
FeatureFlags: Object.freeze({
|
||||
ENABLE_NEXT_DOCKER_RELEASE: false,
|
||||
ENABLE_EXPERIMENTAL_FEATURE: true,
|
||||
ENABLE_DEBUG_MODE: false,
|
||||
ENABLE_BETA_FEATURES: true,
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('UseFeatureFlag Decorator', () => {
|
||||
let reflector: Reflector;
|
||||
|
||||
beforeEach(() => {
|
||||
reflector = new Reflector();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('Basic functionality', () => {
|
||||
it('should omit field when feature flag is false', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Query(() => String)
|
||||
testQuery() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testQuery);
|
||||
expect(metadata).toBe(true); // Should be omitted because flag is false
|
||||
});
|
||||
|
||||
it('should include field when feature flag is true', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_EXPERIMENTAL_FEATURE')
|
||||
@Query(() => String)
|
||||
testQuery() {
|
||||
return 'test';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const metadata = reflector.get(OMIT_IF_METADATA_KEY, instance.testQuery);
|
||||
expect(metadata).toBeUndefined(); // Should not be omitted because flag is true
|
||||
});
|
||||
});
|
||||
|
||||
describe('With different decorator types', () => {
|
||||
it('should work with @Query decorator', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_DEBUG_MODE')
|
||||
@Query(() => String)
|
||||
debugQuery() {
|
||||
return 'debug';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_BETA_FEATURES')
|
||||
@Query(() => String)
|
||||
betaQuery() {
|
||||
return 'beta';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const debugMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.debugQuery);
|
||||
const betaMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.betaQuery);
|
||||
|
||||
expect(debugMetadata).toBe(true); // ENABLE_DEBUG_MODE is false
|
||||
expect(betaMetadata).toBeUndefined(); // ENABLE_BETA_FEATURES is true
|
||||
});
|
||||
|
||||
it('should work with @Mutation decorator', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Mutation(() => String)
|
||||
dockerMutation() {
|
||||
return 'docker';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_EXPERIMENTAL_FEATURE')
|
||||
@Mutation(() => String)
|
||||
experimentalMutation() {
|
||||
return 'experimental';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const dockerMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.dockerMutation);
|
||||
const experimentalMetadata = reflector.get(
|
||||
OMIT_IF_METADATA_KEY,
|
||||
instance.experimentalMutation
|
||||
);
|
||||
|
||||
expect(dockerMetadata).toBe(true); // ENABLE_NEXT_DOCKER_RELEASE is false
|
||||
expect(experimentalMetadata).toBeUndefined(); // ENABLE_EXPERIMENTAL_FEATURE is true
|
||||
});
|
||||
|
||||
it('should work with @ResolveField decorator', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_DEBUG_MODE')
|
||||
@ResolveField(() => String)
|
||||
debugField() {
|
||||
return 'debug';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_BETA_FEATURES')
|
||||
@ResolveField(() => String)
|
||||
betaField() {
|
||||
return 'beta';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const debugMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.debugField);
|
||||
const betaMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.betaField);
|
||||
|
||||
expect(debugMetadata).toBe(true); // ENABLE_DEBUG_MODE is false
|
||||
expect(betaMetadata).toBeUndefined(); // ENABLE_BETA_FEATURES is true
|
||||
});
|
||||
});
|
||||
|
||||
describe('Multiple decorators on same class', () => {
|
||||
it('should handle multiple feature flags independently', () => {
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Query(() => String)
|
||||
dockerQuery() {
|
||||
return 'docker';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_EXPERIMENTAL_FEATURE')
|
||||
@Query(() => String)
|
||||
experimentalQuery() {
|
||||
return 'experimental';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_DEBUG_MODE')
|
||||
@Query(() => String)
|
||||
debugQuery() {
|
||||
return 'debug';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_BETA_FEATURES')
|
||||
@Query(() => String)
|
||||
betaQuery() {
|
||||
return 'beta';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.dockerQuery)).toBe(true);
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.experimentalQuery)).toBeUndefined();
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.debugQuery)).toBe(true);
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.betaQuery)).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Type safety', () => {
|
||||
it('should only accept valid feature flag keys', () => {
|
||||
// This test verifies TypeScript compile-time type safety
|
||||
// The following would cause a TypeScript error if uncommented:
|
||||
// @UseFeatureFlag('INVALID_FLAG')
|
||||
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Query(() => String)
|
||||
validQuery() {
|
||||
return 'valid';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
expect(instance.validQuery).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration scenarios', () => {
|
||||
it('should work correctly with other decorators', () => {
|
||||
const customDecorator = (
|
||||
target: any,
|
||||
propertyKey: string | symbol,
|
||||
descriptor: PropertyDescriptor
|
||||
) => {
|
||||
Reflect.defineMetadata('custom', true, target, propertyKey);
|
||||
return descriptor;
|
||||
};
|
||||
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@customDecorator
|
||||
@Query(() => String)
|
||||
multiDecoratorQuery() {
|
||||
return 'multi';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new TestResolver();
|
||||
const omitMetadata = reflector.get(OMIT_IF_METADATA_KEY, instance.multiDecoratorQuery);
|
||||
const customMetadata = Reflect.getMetadata('custom', instance, 'multiDecoratorQuery');
|
||||
|
||||
expect(omitMetadata).toBe(true);
|
||||
expect(customMetadata).toBe(true);
|
||||
});
|
||||
|
||||
it('should maintain correct decorator order', () => {
|
||||
const orderTracker: string[] = [];
|
||||
|
||||
const trackingDecorator = (name: string) => {
|
||||
return (target: any, propertyKey: string | symbol, descriptor: PropertyDescriptor) => {
|
||||
orderTracker.push(name);
|
||||
return descriptor;
|
||||
};
|
||||
};
|
||||
|
||||
@Resolver()
|
||||
class TestResolver {
|
||||
@trackingDecorator('first')
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@trackingDecorator('last')
|
||||
@Query(() => String)
|
||||
orderedQuery() {
|
||||
return 'ordered';
|
||||
}
|
||||
}
|
||||
|
||||
// Decorators are applied bottom-up
|
||||
expect(orderTracker).toEqual(['last', 'first']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Real-world usage patterns', () => {
|
||||
it('should work with Docker resolver pattern', () => {
|
||||
@Resolver()
|
||||
class DockerResolver {
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Mutation(() => String)
|
||||
async createDockerFolder(name: string) {
|
||||
return `Created folder: ${name}`;
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Mutation(() => String)
|
||||
async deleteDockerEntries(entryIds: string[]) {
|
||||
return `Deleted entries: ${entryIds.join(', ')}`;
|
||||
}
|
||||
|
||||
@Query(() => String)
|
||||
async getDockerInfo() {
|
||||
return 'Docker info';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new DockerResolver();
|
||||
|
||||
// Feature flag is false, so these should be omitted
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.createDockerFolder)).toBe(true);
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.deleteDockerEntries)).toBe(true);
|
||||
|
||||
// No feature flag, so this should not be omitted
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.getDockerInfo)).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle mixed feature flags in same resolver', () => {
|
||||
@Resolver()
|
||||
class MixedResolver {
|
||||
@UseFeatureFlag('ENABLE_EXPERIMENTAL_FEATURE')
|
||||
@Query(() => String)
|
||||
experimentalQuery() {
|
||||
return 'experimental';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@Query(() => String)
|
||||
dockerQuery() {
|
||||
return 'docker';
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_BETA_FEATURES')
|
||||
@Mutation(() => String)
|
||||
betaMutation() {
|
||||
return 'beta';
|
||||
}
|
||||
}
|
||||
|
||||
const instance = new MixedResolver();
|
||||
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.experimentalQuery)).toBeUndefined();
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.dockerQuery)).toBe(true);
|
||||
expect(reflector.get(OMIT_IF_METADATA_KEY, instance.betaMutation)).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
22
api/src/unraid-api/decorators/use-feature-flag.decorator.ts
Normal file
22
api/src/unraid-api/decorators/use-feature-flag.decorator.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { FeatureFlags } from '@app/consts.js';
|
||||
import { OmitIf } from '@app/unraid-api/decorators/omit-if.decorator.js';
|
||||
|
||||
/**
|
||||
* Decorator that conditionally includes a GraphQL field/query/mutation based on a feature flag.
|
||||
* The field will only be included in the schema when the feature flag is enabled.
|
||||
*
|
||||
* @param flagKey - The key of the feature flag in FeatureFlags
|
||||
* @returns A decorator that wraps OmitIf
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* @UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
* @Mutation(() => String)
|
||||
* async experimentalMutation() {
|
||||
* return 'This mutation is only available when ENABLE_NEXT_DOCKER_RELEASE is true';
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export function UseFeatureFlag(flagKey: keyof typeof FeatureFlags): MethodDecorator & PropertyDecorator {
|
||||
return OmitIf(!FeatureFlags[flagKey]);
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import { NoUnusedVariablesRule } from 'graphql';
|
||||
|
||||
import { ENVIRONMENT } from '@app/environment.js';
|
||||
import { ApiConfigModule } from '@app/unraid-api/config/api-config.module.js';
|
||||
import { omitIfSchemaTransformer } from '@app/unraid-api/decorators/omit-if.decorator.js';
|
||||
|
||||
// Import enum registrations to ensure they're registered with GraphQL
|
||||
import '@app/unraid-api/graph/auth/auth-action.enum.js';
|
||||
@@ -64,7 +65,12 @@ import { PluginModule } from '@app/unraid-api/plugin/plugin.module.js';
|
||||
},
|
||||
// Only add transform when not in test environment to avoid GraphQL version conflicts
|
||||
transformSchema:
|
||||
process.env.NODE_ENV === 'test' ? undefined : usePermissionsSchemaTransformer,
|
||||
process.env.NODE_ENV === 'test'
|
||||
? undefined
|
||||
: (schema) => {
|
||||
const schemaWithPermissions = usePermissionsSchemaTransformer(schema);
|
||||
return omitIfSchemaTransformer(schemaWithPermissions);
|
||||
},
|
||||
validationRules: [NoUnusedVariablesRule],
|
||||
};
|
||||
},
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
import { Injectable, Logger, OnApplicationBootstrap } from '@nestjs/common';
|
||||
import { SchedulerRegistry, Timeout } from '@nestjs/schedule';
|
||||
|
||||
import { CronJob } from 'cron';
|
||||
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
|
||||
@Injectable()
|
||||
export class ContainerStatusJob implements OnApplicationBootstrap {
|
||||
private readonly logger = new Logger(ContainerStatusJob.name);
|
||||
constructor(
|
||||
private readonly dockerManifestService: DockerManifestService,
|
||||
private readonly schedulerRegistry: SchedulerRegistry,
|
||||
private readonly dockerConfigService: DockerConfigService
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Initialize cron job for refreshing the update status for all containers on a user-configurable schedule.
|
||||
*/
|
||||
onApplicationBootstrap() {
|
||||
if (!this.dockerConfigService.enabled()) return;
|
||||
const cronExpression = this.dockerConfigService.getConfig().updateCheckCronSchedule;
|
||||
const cronJob = CronJob.from({
|
||||
cronTime: cronExpression,
|
||||
onTick: () => {
|
||||
this.dockerManifestService.refreshDigests().catch((error) => {
|
||||
this.logger.warn(error, 'Failed to refresh container update status');
|
||||
});
|
||||
},
|
||||
start: true,
|
||||
});
|
||||
this.schedulerRegistry.addCronJob(ContainerStatusJob.name, cronJob);
|
||||
this.logger.verbose(
|
||||
`Initialized cron job for refreshing container update status: ${ContainerStatusJob.name}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh container digests 5 seconds after application start.
|
||||
*/
|
||||
@Timeout(5_000)
|
||||
async refreshContainerDigestsAfterStartup() {
|
||||
if (!this.dockerConfigService.enabled()) return;
|
||||
await this.dockerManifestService.refreshDigests();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
import { Field, ObjectType } from '@nestjs/graphql';
|
||||
|
||||
@ObjectType()
|
||||
export class DockerConfig {
|
||||
@Field(() => String)
|
||||
updateCheckCronSchedule!: string;
|
||||
}
|
||||
@@ -0,0 +1,195 @@
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { CronExpression } from '@nestjs/schedule';
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { ValidationError } from 'class-validator';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
|
||||
vi.mock('cron', () => ({
|
||||
validateCronExpression: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('@app/unraid-api/graph/resolvers/validation.utils.js', () => ({
|
||||
validateObject: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('DockerConfigService - validate', () => {
|
||||
let service: DockerConfigService;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
DockerConfigService,
|
||||
{
|
||||
provide: ConfigService,
|
||||
useValue: {
|
||||
get: vi.fn(),
|
||||
},
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<DockerConfigService>(DockerConfigService);
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('validate', () => {
|
||||
it('should validate and return docker config for valid cron expression', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: '0 6 * * *' };
|
||||
const validatedConfig = { updateCheckCronSchedule: '0 6 * * *' };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: true });
|
||||
|
||||
const result = await service.validate(inputConfig);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith('0 6 * * *');
|
||||
expect(result).toBe(validatedConfig);
|
||||
});
|
||||
|
||||
it('should validate and return docker config for predefined cron expression', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: CronExpression.EVERY_DAY_AT_6AM };
|
||||
const validatedConfig = { updateCheckCronSchedule: CronExpression.EVERY_DAY_AT_6AM };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: true });
|
||||
|
||||
const result = await service.validate(inputConfig);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith(CronExpression.EVERY_DAY_AT_6AM);
|
||||
expect(result).toBe(validatedConfig);
|
||||
});
|
||||
|
||||
it('should throw AppError for invalid cron expression', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: 'invalid-cron' };
|
||||
const validatedConfig = { updateCheckCronSchedule: 'invalid-cron' };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: false });
|
||||
|
||||
await expect(service.validate(inputConfig)).rejects.toThrow(
|
||||
new AppError('Cron expression not supported: invalid-cron')
|
||||
);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith('invalid-cron');
|
||||
});
|
||||
|
||||
it('should throw AppError for empty cron expression', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: '' };
|
||||
const validatedConfig = { updateCheckCronSchedule: '' };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: false });
|
||||
|
||||
await expect(service.validate(inputConfig)).rejects.toThrow(
|
||||
new AppError('Cron expression not supported: ')
|
||||
);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith('');
|
||||
});
|
||||
|
||||
it('should throw AppError for malformed cron expression', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: '* * * *' };
|
||||
const validatedConfig = { updateCheckCronSchedule: '* * * *' };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: false });
|
||||
|
||||
await expect(service.validate(inputConfig)).rejects.toThrow(
|
||||
new AppError('Cron expression not supported: * * * *')
|
||||
);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith('* * * *');
|
||||
});
|
||||
|
||||
it('should propagate validation errors from validateObject', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: '0 6 * * *' };
|
||||
const validationError = new ValidationError();
|
||||
validationError.property = 'updateCheckCronSchedule';
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
|
||||
vi.mocked(validateObject).mockRejectedValue(validationError);
|
||||
|
||||
await expect(service.validate(inputConfig)).rejects.toThrow();
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
});
|
||||
|
||||
it('should handle complex valid cron expressions', async () => {
|
||||
const inputConfig = { updateCheckCronSchedule: '0 0,12 * * 1-5' };
|
||||
const validatedConfig = { updateCheckCronSchedule: '0 0,12 * * 1-5' };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: true });
|
||||
|
||||
const result = await service.validate(inputConfig);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith('0 0,12 * * 1-5');
|
||||
expect(result).toBe(validatedConfig);
|
||||
});
|
||||
|
||||
it('should handle input with extra properties', async () => {
|
||||
const inputConfig = {
|
||||
updateCheckCronSchedule: '0 6 * * *',
|
||||
extraProperty: 'should be ignored',
|
||||
};
|
||||
const validatedConfig = { updateCheckCronSchedule: '0 6 * * *' };
|
||||
|
||||
const { validateObject } = await import(
|
||||
'@app/unraid-api/graph/resolvers/validation.utils.js'
|
||||
);
|
||||
const { validateCronExpression } = await import('cron');
|
||||
|
||||
vi.mocked(validateObject).mockResolvedValue(validatedConfig);
|
||||
vi.mocked(validateCronExpression).mockReturnValue({ valid: true });
|
||||
|
||||
const result = await service.validate(inputConfig);
|
||||
|
||||
expect(validateObject).toHaveBeenCalledWith(expect.any(Function), inputConfig);
|
||||
expect(validateCronExpression).toHaveBeenCalledWith('0 6 * * *');
|
||||
expect(result).toBe(validatedConfig);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,59 +1,45 @@
|
||||
import { Injectable } from '@nestjs/common';
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { CronExpression } from '@nestjs/schedule';
|
||||
|
||||
import { ConfigFilePersister } from '@unraid/shared/services/config-file.js';
|
||||
import { validateCronExpression } from 'cron';
|
||||
|
||||
import { FeatureFlags } from '@app/consts.js';
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { DockerConfig } from '@app/unraid-api/graph/resolvers/docker/docker-config.model.js';
|
||||
import { validateObject } from '@app/unraid-api/graph/resolvers/validation.utils.js';
|
||||
import {
|
||||
DEFAULT_ORGANIZER_ROOT_ID,
|
||||
DEFAULT_ORGANIZER_VIEW_ID,
|
||||
} from '@app/unraid-api/organizer/organizer.js';
|
||||
import { OrganizerV1 } from '@app/unraid-api/organizer/organizer.model.js';
|
||||
import { validateOrganizerIntegrity } from '@app/unraid-api/organizer/organizer.validation.js';
|
||||
|
||||
@Injectable()
|
||||
export class DockerConfigService extends ConfigFilePersister<OrganizerV1> {
|
||||
export class DockerConfigService extends ConfigFilePersister<DockerConfig> {
|
||||
constructor(configService: ConfigService) {
|
||||
super(configService);
|
||||
}
|
||||
|
||||
enabled(): boolean {
|
||||
return FeatureFlags.ENABLE_NEXT_DOCKER_RELEASE;
|
||||
}
|
||||
|
||||
configKey(): string {
|
||||
return 'dockerOrganizer';
|
||||
return 'docker';
|
||||
}
|
||||
|
||||
fileName(): string {
|
||||
return 'docker.organizer.json';
|
||||
return 'docker.config.json';
|
||||
}
|
||||
|
||||
defaultConfig(): OrganizerV1 {
|
||||
defaultConfig(): DockerConfig {
|
||||
return {
|
||||
version: 1,
|
||||
resources: {},
|
||||
views: {
|
||||
default: {
|
||||
id: DEFAULT_ORGANIZER_VIEW_ID,
|
||||
name: 'Default',
|
||||
root: DEFAULT_ORGANIZER_ROOT_ID,
|
||||
entries: {
|
||||
root: {
|
||||
type: 'folder',
|
||||
id: DEFAULT_ORGANIZER_ROOT_ID,
|
||||
name: 'Root',
|
||||
children: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
updateCheckCronSchedule: CronExpression.EVERY_DAY_AT_6AM,
|
||||
};
|
||||
}
|
||||
|
||||
async validate(config: object): Promise<OrganizerV1> {
|
||||
const organizer = await validateObject(OrganizerV1, config);
|
||||
const { isValid, errors } = await validateOrganizerIntegrity(organizer);
|
||||
if (!isValid) {
|
||||
throw new AppError(`Docker organizer validation failed: ${JSON.stringify(errors, null, 2)}`);
|
||||
async validate(config: object): Promise<DockerConfig> {
|
||||
const dockerConfig = await validateObject(DockerConfig, config);
|
||||
const cronExpression = validateCronExpression(dockerConfig.updateCheckCronSchedule);
|
||||
if (!cronExpression.valid) {
|
||||
throw new AppError(`Cron expression not supported: ${dockerConfig.updateCheckCronSchedule}`);
|
||||
}
|
||||
return organizer;
|
||||
return dockerConfig;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { Mutation, Parent, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
|
||||
import { Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { AuthAction, UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
|
||||
@Resolver(() => DockerContainer)
|
||||
export class DockerContainerResolver {
|
||||
private readonly logger = new Logger(DockerContainerResolver.name);
|
||||
constructor(private readonly dockerManifestService: DockerManifestService) {}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => Boolean, { nullable: true })
|
||||
public async isUpdateAvailable(@Parent() container: DockerContainer) {
|
||||
try {
|
||||
return await this.dockerManifestService.isUpdateAvailableCached(container.image);
|
||||
} catch (error) {
|
||||
this.logger.error(error);
|
||||
throw new AppError('Failed to read cached update status. See graphql-api.log for details.');
|
||||
}
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => Boolean, { nullable: true })
|
||||
public async isRebuildReady(@Parent() container: DockerContainer) {
|
||||
return this.dockerManifestService.isRebuildReady(container.hostConfig?.networkMode);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@Mutation(() => Boolean)
|
||||
public async refreshDockerDigests() {
|
||||
return this.dockerManifestService.refreshDigests();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
import { Injectable } from '@nestjs/common';
|
||||
|
||||
import { AsyncMutex } from '@unraid/shared/util/processing.js';
|
||||
|
||||
import { docker } from '@app/core/utils/index.js';
|
||||
import {
|
||||
CachedStatusEntry,
|
||||
DockerPhpService,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
|
||||
@Injectable()
|
||||
export class DockerManifestService {
|
||||
constructor(private readonly dockerPhpService: DockerPhpService) {}
|
||||
|
||||
private readonly refreshDigestsMutex = new AsyncMutex(() => {
|
||||
return this.dockerPhpService.refreshDigestsViaPhp();
|
||||
});
|
||||
|
||||
/**
|
||||
* Recomputes local/remote docker container digests and writes them to /var/lib/docker/unraid-update-status.json
|
||||
* @param mutex - Optional mutex to use for the operation. If not provided, a default mutex will be used.
|
||||
* @param dockerUpdatePath - Optional path to the DockerUpdate.php file. If not provided, the default path will be used.
|
||||
* @returns True if the digests were refreshed, false if the operation failed
|
||||
*/
|
||||
async refreshDigests(mutex = this.refreshDigestsMutex, dockerUpdatePath?: string) {
|
||||
return mutex.do(() => {
|
||||
return this.dockerPhpService.refreshDigestsViaPhp(dockerUpdatePath);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if an update is available for a given container image.
|
||||
* @param imageRef - The image reference to check, e.g. "unraid/baseimage:latest". If no tag is provided, "latest" is assumed, following the webgui's implementation.
|
||||
* @param cacheData read from /var/lib/docker/unraid-update-status.json by default
|
||||
* @returns True if an update is available, false if not, or null if the status is unknown
|
||||
*/
|
||||
async isUpdateAvailableCached(imageRef: string, cacheData?: Record<string, CachedStatusEntry>) {
|
||||
let taggedRef = imageRef;
|
||||
if (!taggedRef.includes(':')) taggedRef += ':latest';
|
||||
|
||||
cacheData ??= await this.dockerPhpService.readCachedUpdateStatus();
|
||||
const containerData = cacheData[taggedRef];
|
||||
if (!containerData) return null;
|
||||
return containerData.status?.toLowerCase() === 'true';
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a container is rebuild ready.
|
||||
* @param networkMode - The network mode of the container, e.g. "container:unraid/baseimage:latest".
|
||||
* @returns True if the container is rebuild ready, false if not
|
||||
*/
|
||||
async isRebuildReady(networkMode?: string) {
|
||||
if (!networkMode || !networkMode.startsWith('container:')) return false;
|
||||
const target = networkMode.slice('container:'.length);
|
||||
try {
|
||||
await docker.getContainer(target).inspect();
|
||||
return false;
|
||||
} catch {
|
||||
return true; // unresolved target -> ':???' equivalent
|
||||
}
|
||||
}
|
||||
}
|
||||
130
api/src/unraid-api/graph/resolvers/docker/docker-php.service.ts
Normal file
130
api/src/unraid-api/graph/resolvers/docker/docker-php.service.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { readFile } from 'fs/promises';
|
||||
|
||||
import { z } from 'zod';
|
||||
|
||||
import { phpLoader } from '@app/core/utils/plugins/php-loader.js';
|
||||
import {
|
||||
ExplicitStatusItem,
|
||||
UpdateStatus,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker-update-status.model.js';
|
||||
import { parseDockerPushCalls } from '@app/unraid-api/graph/resolvers/docker/utils/docker-push-parser.js';
|
||||
|
||||
type StatusItem = { name: string; updateStatus: 0 | 1 | 2 | 3 };
|
||||
|
||||
/**
|
||||
* These types reflect the structure of the /var/lib/docker/unraid-update-status.json file,
|
||||
* which is not controlled by the Unraid API.
|
||||
*/
|
||||
const CachedStatusEntrySchema = z.object({
|
||||
/** sha256 digest - "sha256:..." */
|
||||
local: z.string(),
|
||||
/** sha256 digest - "sha256:..." */
|
||||
remote: z.string(),
|
||||
/** whether update is available (true), not available (false), or unknown (null) */
|
||||
status: z.enum(['true', 'false']).nullable(),
|
||||
});
|
||||
const CachedStatusSchema = z.record(z.string(), CachedStatusEntrySchema);
|
||||
export type CachedStatusEntry = z.infer<typeof CachedStatusEntrySchema>;
|
||||
|
||||
@Injectable()
|
||||
export class DockerPhpService {
|
||||
private readonly logger = new Logger(DockerPhpService.name);
|
||||
constructor() {}
|
||||
|
||||
/**
|
||||
* Reads JSON from a file containing cached update status.
|
||||
* If the file does not exist, an empty object is returned.
|
||||
* @param cacheFile
|
||||
* @returns
|
||||
*/
|
||||
async readCachedUpdateStatus(
|
||||
cacheFile = '/var/lib/docker/unraid-update-status.json'
|
||||
): Promise<Record<string, CachedStatusEntry>> {
|
||||
try {
|
||||
const cache = await readFile(cacheFile, 'utf8');
|
||||
const cacheData = JSON.parse(cache);
|
||||
const { success, data } = CachedStatusSchema.safeParse(cacheData);
|
||||
if (success) return data;
|
||||
this.logger.warn(cacheData, 'Invalid cached update status');
|
||||
return {};
|
||||
} catch (error) {
|
||||
this.logger.warn(error, 'Failed to read cached update status');
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/**----------------------
|
||||
* Refresh Container Digests
|
||||
*------------------------**/
|
||||
|
||||
/**
|
||||
* Recomputes local/remote digests by triggering `DockerTemplates->getAllInfo(true)` via DockerUpdate.php
|
||||
* @param dockerUpdatePath - Path to the DockerUpdate.php file
|
||||
* @returns True if the digests were refreshed, false if the file is not found or the operation failed
|
||||
*/
|
||||
async refreshDigestsViaPhp(
|
||||
dockerUpdatePath = '/usr/local/emhttp/plugins/dynamix.docker.manager/include/DockerUpdate.php'
|
||||
) {
|
||||
try {
|
||||
await phpLoader({
|
||||
file: dockerUpdatePath,
|
||||
method: 'GET',
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
// ignore; offline may keep remote as 'undef'
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**----------------------
|
||||
* Parse Container Statuses
|
||||
*------------------------**/
|
||||
|
||||
private parseStatusesFromDockerPush(js: string): ExplicitStatusItem[] {
|
||||
const matches = parseDockerPushCalls(js);
|
||||
return matches.map(({ name, updateStatus }) => ({
|
||||
name,
|
||||
updateStatus: this.updateStatusToString(updateStatus as StatusItem['updateStatus']),
|
||||
}));
|
||||
}
|
||||
|
||||
private updateStatusToString(updateStatus: 0): UpdateStatus.UP_TO_DATE;
|
||||
private updateStatusToString(updateStatus: 1): UpdateStatus.UPDATE_AVAILABLE;
|
||||
private updateStatusToString(updateStatus: 2): UpdateStatus.REBUILD_READY;
|
||||
private updateStatusToString(updateStatus: 3): UpdateStatus.UNKNOWN;
|
||||
// prettier-ignore
|
||||
private updateStatusToString(updateStatus: StatusItem['updateStatus']): ExplicitStatusItem['updateStatus'];
|
||||
private updateStatusToString(
|
||||
updateStatus: StatusItem['updateStatus']
|
||||
): ExplicitStatusItem['updateStatus'] {
|
||||
switch (updateStatus) {
|
||||
case 0:
|
||||
return UpdateStatus.UP_TO_DATE;
|
||||
case 1:
|
||||
return UpdateStatus.UPDATE_AVAILABLE;
|
||||
case 2:
|
||||
return UpdateStatus.REBUILD_READY;
|
||||
default:
|
||||
return UpdateStatus.UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the update statuses for all containers by triggering `DockerTemplates->getAllInfo(true)` via DockerContainers.php
|
||||
* @param dockerContainersPath - Path to the DockerContainers.php file
|
||||
* @returns The update statuses for all containers
|
||||
*/
|
||||
async getContainerUpdateStatuses(
|
||||
dockerContainersPath = '/usr/local/emhttp/plugins/dynamix.docker.manager/include/DockerContainers.php'
|
||||
): Promise<ExplicitStatusItem[]> {
|
||||
const stdout = await phpLoader({
|
||||
file: dockerContainersPath,
|
||||
method: 'GET',
|
||||
});
|
||||
const parts = stdout.split('\0'); // [html, "docker.push(...)", busyFlag]
|
||||
const js = parts[1] || '';
|
||||
return this.parseStatusesFromDockerPush(js);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
import { Field, ObjectType, registerEnumType } from '@nestjs/graphql';
|
||||
|
||||
/**
|
||||
* Note that these values propagate down to API consumers, so be aware of breaking changes.
|
||||
*/
|
||||
export enum UpdateStatus {
|
||||
UP_TO_DATE = 'UP_TO_DATE',
|
||||
UPDATE_AVAILABLE = 'UPDATE_AVAILABLE',
|
||||
REBUILD_READY = 'REBUILD_READY',
|
||||
UNKNOWN = 'UNKNOWN',
|
||||
}
|
||||
|
||||
registerEnumType(UpdateStatus, {
|
||||
name: 'UpdateStatus',
|
||||
description: 'Update status of a container.',
|
||||
});
|
||||
|
||||
@ObjectType()
|
||||
export class ExplicitStatusItem {
|
||||
@Field(() => String)
|
||||
name!: string;
|
||||
|
||||
@Field(() => UpdateStatus)
|
||||
updateStatus!: UpdateStatus;
|
||||
}
|
||||
@@ -1,15 +1,16 @@
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerEventService } from '@app/unraid-api/graph/resolvers/docker/docker-event.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/docker-organizer.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { DockerModule } from '@app/unraid-api/graph/resolvers/docker/docker.module.js';
|
||||
import { DockerMutationsResolver } from '@app/unraid-api/graph/resolvers/docker/docker.mutations.resolver.js';
|
||||
import { DockerResolver } from '@app/unraid-api/graph/resolvers/docker/docker.resolver.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerConfigService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer-config.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
|
||||
describe('DockerModule', () => {
|
||||
it('should compile the module', async () => {
|
||||
@@ -18,6 +19,8 @@ describe('DockerModule', () => {
|
||||
})
|
||||
.overrideProvider(DockerService)
|
||||
.useValue({ getDockerClient: vi.fn() })
|
||||
.overrideProvider(DockerOrganizerConfigService)
|
||||
.useValue({ getConfig: vi.fn() })
|
||||
.overrideProvider(DockerConfigService)
|
||||
.useValue({ getConfig: vi.fn() })
|
||||
.compile();
|
||||
@@ -61,6 +64,7 @@ describe('DockerModule', () => {
|
||||
DockerResolver,
|
||||
{ provide: DockerService, useValue: {} },
|
||||
{ provide: DockerOrganizerService, useValue: {} },
|
||||
{ provide: DockerPhpService, useValue: { getContainerUpdateStatuses: vi.fn() } },
|
||||
],
|
||||
}).compile();
|
||||
|
||||
|
||||
@@ -1,22 +1,36 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
|
||||
import { JobModule } from '@app/unraid-api/cron/job.module.js';
|
||||
import { ContainerStatusJob } from '@app/unraid-api/graph/resolvers/docker/container-status.job.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/docker-organizer.service.js';
|
||||
import { DockerContainerResolver } from '@app/unraid-api/graph/resolvers/docker/docker-container.resolver.js';
|
||||
import { DockerManifestService } from '@app/unraid-api/graph/resolvers/docker/docker-manifest.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { DockerMutationsResolver } from '@app/unraid-api/graph/resolvers/docker/docker.mutations.resolver.js';
|
||||
import { DockerResolver } from '@app/unraid-api/graph/resolvers/docker/docker.resolver.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerConfigService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer-config.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
|
||||
@Module({
|
||||
imports: [JobModule],
|
||||
providers: [
|
||||
// Services
|
||||
DockerService,
|
||||
DockerConfigService,
|
||||
DockerOrganizerConfigService,
|
||||
DockerOrganizerService,
|
||||
DockerManifestService,
|
||||
DockerPhpService,
|
||||
DockerConfigService,
|
||||
// DockerEventService,
|
||||
|
||||
// Jobs
|
||||
ContainerStatusJob,
|
||||
|
||||
// Resolvers
|
||||
DockerResolver,
|
||||
DockerMutationsResolver,
|
||||
DockerContainerResolver,
|
||||
],
|
||||
exports: [DockerService],
|
||||
})
|
||||
|
||||
@@ -3,10 +3,11 @@ import { Test } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/docker-organizer.service.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { ContainerState, DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerResolver } from '@app/unraid-api/graph/resolvers/docker/docker.resolver.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
|
||||
describe('DockerResolver', () => {
|
||||
let resolver: DockerResolver;
|
||||
@@ -26,7 +27,13 @@ describe('DockerResolver', () => {
|
||||
{
|
||||
provide: DockerOrganizerService,
|
||||
useValue: {
|
||||
getResolvedOrganizer: vi.fn(),
|
||||
resolveOrganizer: vi.fn(),
|
||||
},
|
||||
},
|
||||
{
|
||||
provide: DockerPhpService,
|
||||
useValue: {
|
||||
getContainerUpdateStatuses: vi.fn(),
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
@@ -3,21 +3,25 @@ import { Args, Mutation, Query, ResolveField, Resolver } from '@nestjs/graphql';
|
||||
import { AuthAction, Resource } from '@unraid/shared/graphql.model.js';
|
||||
import { UsePermissions } from '@unraid/shared/use-permissions.directive.js';
|
||||
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/docker-organizer.service.js';
|
||||
import { UseFeatureFlag } from '@app/unraid-api/decorators/use-feature-flag.decorator.js';
|
||||
import { DockerPhpService } from '@app/unraid-api/graph/resolvers/docker/docker-php.service.js';
|
||||
import { ExplicitStatusItem } from '@app/unraid-api/graph/resolvers/docker/docker-update-status.model.js';
|
||||
import {
|
||||
Docker,
|
||||
DockerContainer,
|
||||
DockerNetwork,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
import { DEFAULT_ORGANIZER_ROOT_ID } from '@app/unraid-api/organizer/organizer.js';
|
||||
import { OrganizerV1, ResolvedOrganizerV1 } from '@app/unraid-api/organizer/organizer.model.js';
|
||||
import { ResolvedOrganizerV1 } from '@app/unraid-api/organizer/organizer.model.js';
|
||||
|
||||
@Resolver(() => Docker)
|
||||
export class DockerResolver {
|
||||
constructor(
|
||||
private readonly dockerService: DockerService,
|
||||
private readonly dockerOrganizerService: DockerOrganizerService
|
||||
private readonly dockerOrganizerService: DockerOrganizerService,
|
||||
private readonly dockerPhpService: DockerPhpService
|
||||
) {}
|
||||
|
||||
@UsePermissions({
|
||||
@@ -53,6 +57,7 @@ export class DockerResolver {
|
||||
return this.dockerService.getNetworks({ skipCache });
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
@@ -62,6 +67,7 @@ export class DockerResolver {
|
||||
return this.dockerOrganizerService.resolveOrganizer();
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
@@ -80,6 +86,7 @@ export class DockerResolver {
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
@@ -96,6 +103,7 @@ export class DockerResolver {
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
@@ -108,6 +116,7 @@ export class DockerResolver {
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.UPDATE_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
@@ -123,4 +132,14 @@ export class DockerResolver {
|
||||
});
|
||||
return this.dockerOrganizerService.resolveOrganizer(organizer);
|
||||
}
|
||||
|
||||
@UseFeatureFlag('ENABLE_NEXT_DOCKER_RELEASE')
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.DOCKER,
|
||||
})
|
||||
@ResolveField(() => [ExplicitStatusItem])
|
||||
public async containerUpdateStatuses() {
|
||||
return this.dockerPhpService.getContainerUpdateStatuses();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
import { Injectable } from '@nestjs/common';
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
|
||||
import { ConfigFilePersister } from '@unraid/shared/services/config-file.js';
|
||||
|
||||
import { FeatureFlags } from '@app/consts.js';
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { validateObject } from '@app/unraid-api/graph/resolvers/validation.utils.js';
|
||||
import {
|
||||
DEFAULT_ORGANIZER_ROOT_ID,
|
||||
DEFAULT_ORGANIZER_VIEW_ID,
|
||||
} from '@app/unraid-api/organizer/organizer.js';
|
||||
import { OrganizerV1 } from '@app/unraid-api/organizer/organizer.model.js';
|
||||
import { validateOrganizerIntegrity } from '@app/unraid-api/organizer/organizer.validation.js';
|
||||
|
||||
@Injectable()
|
||||
export class DockerOrganizerConfigService extends ConfigFilePersister<OrganizerV1> {
|
||||
constructor(configService: ConfigService) {
|
||||
super(configService);
|
||||
}
|
||||
|
||||
enabled(): boolean {
|
||||
return FeatureFlags.ENABLE_NEXT_DOCKER_RELEASE;
|
||||
}
|
||||
|
||||
configKey(): string {
|
||||
return 'dockerOrganizer';
|
||||
}
|
||||
|
||||
fileName(): string {
|
||||
return 'docker.organizer.json';
|
||||
}
|
||||
|
||||
defaultConfig(): OrganizerV1 {
|
||||
return {
|
||||
version: 1,
|
||||
resources: {},
|
||||
views: {
|
||||
default: {
|
||||
id: DEFAULT_ORGANIZER_VIEW_ID,
|
||||
name: 'Default',
|
||||
root: DEFAULT_ORGANIZER_ROOT_ID,
|
||||
entries: {
|
||||
root: {
|
||||
type: 'folder',
|
||||
id: DEFAULT_ORGANIZER_ROOT_ID,
|
||||
name: 'Root',
|
||||
children: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async validate(config: object): Promise<OrganizerV1> {
|
||||
const organizer = await validateObject(OrganizerV1, config);
|
||||
const { isValid, errors } = await validateOrganizerIntegrity(organizer);
|
||||
if (!isValid) {
|
||||
throw new AppError(`Docker organizer validation failed: ${JSON.stringify(errors, null, 2)}`);
|
||||
}
|
||||
return organizer;
|
||||
}
|
||||
}
|
||||
@@ -2,17 +2,17 @@ import { Test } from '@nestjs/testing';
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import {
|
||||
containerToResource,
|
||||
DockerOrganizerService,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker-organizer.service.js';
|
||||
import {
|
||||
ContainerPortType,
|
||||
ContainerState,
|
||||
DockerContainer,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerConfigService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer-config.service.js';
|
||||
import {
|
||||
containerToResource,
|
||||
DockerOrganizerService,
|
||||
} from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer.service.js';
|
||||
import { OrganizerV1 } from '@app/unraid-api/organizer/organizer.model.js';
|
||||
|
||||
describe('containerToResource', () => {
|
||||
@@ -138,7 +138,7 @@ describe('containerToResource', () => {
|
||||
|
||||
describe('DockerOrganizerService', () => {
|
||||
let service: DockerOrganizerService;
|
||||
let configService: DockerConfigService;
|
||||
let configService: DockerOrganizerConfigService;
|
||||
let dockerService: DockerService;
|
||||
|
||||
const mockOrganizer: OrganizerV1 = {
|
||||
@@ -178,7 +178,7 @@ describe('DockerOrganizerService', () => {
|
||||
providers: [
|
||||
DockerOrganizerService,
|
||||
{
|
||||
provide: DockerConfigService,
|
||||
provide: DockerOrganizerConfigService,
|
||||
useValue: {
|
||||
getConfig: vi.fn().mockImplementation(() => structuredClone(mockOrganizer)),
|
||||
validate: vi.fn().mockImplementation((config) => Promise.resolve(config)),
|
||||
@@ -220,7 +220,7 @@ describe('DockerOrganizerService', () => {
|
||||
}).compile();
|
||||
|
||||
service = moduleRef.get<DockerOrganizerService>(DockerOrganizerService);
|
||||
configService = moduleRef.get<DockerConfigService>(DockerConfigService);
|
||||
configService = moduleRef.get<DockerOrganizerConfigService>(DockerOrganizerConfigService);
|
||||
dockerService = moduleRef.get<DockerService>(DockerService);
|
||||
});
|
||||
|
||||
@@ -3,9 +3,9 @@ import { Injectable, Logger } from '@nestjs/common';
|
||||
import type { ContainerListOptions } from 'dockerode';
|
||||
|
||||
import { AppError } from '@app/core/errors/app-error.js';
|
||||
import { DockerConfigService } from '@app/unraid-api/graph/resolvers/docker/docker-config.service.js';
|
||||
import { DockerContainer } from '@app/unraid-api/graph/resolvers/docker/docker.model.js';
|
||||
import { DockerService } from '@app/unraid-api/graph/resolvers/docker/docker.service.js';
|
||||
import { DockerOrganizerConfigService } from '@app/unraid-api/graph/resolvers/docker/organizer/docker-organizer-config.service.js';
|
||||
import {
|
||||
addMissingResourcesToView,
|
||||
createFolderInView,
|
||||
@@ -47,7 +47,7 @@ export function containerListToResourcesObject(containers: DockerContainer[]): O
|
||||
export class DockerOrganizerService {
|
||||
private readonly logger = new Logger(DockerOrganizerService.name);
|
||||
constructor(
|
||||
private readonly dockerConfigService: DockerConfigService,
|
||||
private readonly dockerConfigService: DockerOrganizerConfigService,
|
||||
private readonly dockerService: DockerService
|
||||
) {}
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import type { DockerPushMatch } from '@app/unraid-api/graph/resolvers/docker/utils/docker-push-parser.js';
|
||||
import { parseDockerPushCalls } from '@app/unraid-api/graph/resolvers/docker/utils/docker-push-parser.js';
|
||||
|
||||
describe('parseDockerPushCalls', () => {
|
||||
it('should extract name and update status from valid docker.push call', () => {
|
||||
const jsCode = "docker.push({name:'nginx',update:1});";
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([{ name: 'nginx', updateStatus: 1 }]);
|
||||
});
|
||||
|
||||
it('should handle multiple docker.push calls in same string', () => {
|
||||
const jsCode = `
|
||||
docker.push({name:'nginx',update:1});
|
||||
docker.push({name:'mysql',update:0});
|
||||
docker.push({name:'redis',update:2});
|
||||
`;
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ name: 'nginx', updateStatus: 1 },
|
||||
{ name: 'mysql', updateStatus: 0 },
|
||||
{ name: 'redis', updateStatus: 2 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle docker.push calls with additional properties', () => {
|
||||
const jsCode =
|
||||
"docker.push({id:'123',name:'nginx',version:'latest',update:3,status:'running'});";
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([{ name: 'nginx', updateStatus: 3 }]);
|
||||
});
|
||||
|
||||
it('should handle different property order', () => {
|
||||
const jsCode = "docker.push({update:2,name:'postgres',id:'456'});";
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([{ name: 'postgres', updateStatus: 2 }]);
|
||||
});
|
||||
|
||||
it('should handle container names with special characters', () => {
|
||||
const jsCode = "docker.push({name:'my-app_v2.0',update:1});";
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([{ name: 'my-app_v2.0', updateStatus: 1 }]);
|
||||
});
|
||||
|
||||
it('should handle whitespace variations', () => {
|
||||
const jsCode = "docker.push({ name: 'nginx' , update: 1 });";
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([{ name: 'nginx', updateStatus: 1 }]);
|
||||
});
|
||||
|
||||
it('should return empty array for empty string', () => {
|
||||
const result = parseDockerPushCalls('');
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it('should return empty array when no docker.push calls found', () => {
|
||||
const jsCode = "console.log('no docker calls here');";
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it('should ignore malformed docker.push calls', () => {
|
||||
const jsCode = `
|
||||
docker.push({name:'valid',update:1});
|
||||
docker.push({name:'missing-update'});
|
||||
docker.push({update:2});
|
||||
docker.push({name:'another-valid',update:0});
|
||||
`;
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ name: 'valid', updateStatus: 1 },
|
||||
{ name: 'another-valid', updateStatus: 0 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle all valid update status values', () => {
|
||||
const jsCode = `
|
||||
docker.push({name:'container0',update:0});
|
||||
docker.push({name:'container1',update:1});
|
||||
docker.push({name:'container2',update:2});
|
||||
docker.push({name:'container3',update:3});
|
||||
`;
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ name: 'container0', updateStatus: 0 },
|
||||
{ name: 'container1', updateStatus: 1 },
|
||||
{ name: 'container2', updateStatus: 2 },
|
||||
{ name: 'container3', updateStatus: 3 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle real-world example with HTML and multiple containers', () => {
|
||||
const jsCode = `
|
||||
<div>some html</div>
|
||||
docker.push({id:'abc123',name:'plex',version:'1.32',update:1,autostart:true});
|
||||
docker.push({id:'def456',name:'nextcloud',version:'latest',update:0,ports:'80:8080'});
|
||||
<script>more content</script>
|
||||
docker.push({id:'ghi789',name:'homeassistant',update:2});
|
||||
`;
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ name: 'plex', updateStatus: 1 },
|
||||
{ name: 'nextcloud', updateStatus: 0 },
|
||||
{ name: 'homeassistant', updateStatus: 2 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle nested braces in other properties', () => {
|
||||
const jsCode = 'docker.push({config:\'{"nested":"value"}\',name:\'test\',update:1});';
|
||||
const result = parseDockerPushCalls(jsCode);
|
||||
|
||||
expect(result).toEqual([{ name: 'test', updateStatus: 1 }]);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,24 @@
|
||||
export interface DockerPushMatch {
|
||||
name: string;
|
||||
updateStatus: number;
|
||||
}
|
||||
|
||||
export function parseDockerPushCalls(jsCode: string): DockerPushMatch[] {
|
||||
const dockerPushRegex = /docker\.push\(\{[^}]*(?:(?:[^{}]|{[^}]*})*)\}\);/g;
|
||||
const matches: DockerPushMatch[] = [];
|
||||
|
||||
for (const match of jsCode.matchAll(dockerPushRegex)) {
|
||||
const objectContent = match[0];
|
||||
|
||||
const nameMatch = objectContent.match(/name\s*:\s*'([^']+)'/);
|
||||
const updateMatch = objectContent.match(/update\s*:\s*(\d)/);
|
||||
|
||||
if (nameMatch && updateMatch) {
|
||||
const name = nameMatch[1];
|
||||
const updateStatus = Number(updateMatch[1]);
|
||||
matches.push({ name, updateStatus });
|
||||
}
|
||||
}
|
||||
|
||||
return matches;
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Injectable, Logger, OnModuleDestroy, OnModuleInit } from '@nestjs/common';
|
||||
import { Injectable, Logger, OnApplicationBootstrap, OnModuleDestroy } from '@nestjs/common';
|
||||
import crypto from 'crypto';
|
||||
import { ChildProcess } from 'node:child_process';
|
||||
import { mkdir, rm, writeFile } from 'node:fs/promises';
|
||||
@@ -7,6 +7,7 @@ import { dirname, join } from 'node:path';
|
||||
import { execa } from 'execa';
|
||||
import got, { HTTPError } from 'got';
|
||||
import pRetry from 'p-retry';
|
||||
import semver from 'semver';
|
||||
|
||||
import { sanitizeParams } from '@app/core/log.js';
|
||||
import { fileExists } from '@app/core/utils/files/file-exists.js';
|
||||
@@ -25,7 +26,7 @@ import {
|
||||
import { validateObject } from '@app/unraid-api/graph/resolvers/validation.utils.js';
|
||||
|
||||
@Injectable()
|
||||
export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
export class RCloneApiService implements OnApplicationBootstrap, OnModuleDestroy {
|
||||
private isInitialized: boolean = false;
|
||||
private readonly logger = new Logger(RCloneApiService.name);
|
||||
private rcloneSocketPath: string = '';
|
||||
@@ -44,7 +45,7 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
return this.isInitialized;
|
||||
}
|
||||
|
||||
async onModuleInit(): Promise<void> {
|
||||
async onApplicationBootstrap(): Promise<void> {
|
||||
// RClone startup disabled - early return
|
||||
if (ENVIRONMENT === 'production') {
|
||||
this.logger.debug('RClone startup is disabled');
|
||||
@@ -239,12 +240,41 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy {
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the RClone binary is available on the system
|
||||
* Checks if the RClone binary is available on the system and meets minimum version requirements
|
||||
*/
|
||||
private async checkRcloneBinaryExists(): Promise<boolean> {
|
||||
try {
|
||||
await execa('rclone', ['version']);
|
||||
this.logger.debug('RClone binary is available on the system.');
|
||||
const result = await execa('rclone', ['version']);
|
||||
const versionOutput = result.stdout.trim();
|
||||
|
||||
// Extract raw version string (format: "rclone vX.XX.X" or "rclone vX.XX.X-beta.X")
|
||||
const versionMatch = versionOutput.match(/rclone v([\d.\-\w]+)/);
|
||||
if (!versionMatch) {
|
||||
this.logger.error('Unable to parse RClone version from output');
|
||||
return false;
|
||||
}
|
||||
|
||||
const rawVersion = versionMatch[1];
|
||||
|
||||
// Use semver.coerce to get base semver from prerelease versions
|
||||
const coercedVersion = semver.coerce(rawVersion);
|
||||
if (!coercedVersion) {
|
||||
this.logger.error(`Failed to parse RClone version: raw="${rawVersion}"`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const minimumVersion = '1.70.0';
|
||||
|
||||
if (!semver.gte(coercedVersion, minimumVersion)) {
|
||||
this.logger.error(
|
||||
`RClone version ${rawVersion} (coerced: ${coercedVersion}) is too old. Minimum required version is ${minimumVersion}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
this.logger.debug(
|
||||
`RClone binary is available on the system (version ${rawVersion}, coerced: ${coercedVersion}).`
|
||||
);
|
||||
return true;
|
||||
} catch (error: unknown) {
|
||||
if (error instanceof Error && 'code' in error && error.code === 'ENOENT') {
|
||||
|
||||
28
api/src/unraid-api/utils/feature-flag.helper.ts
Normal file
28
api/src/unraid-api/utils/feature-flag.helper.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { ForbiddenException } from '@nestjs/common';
|
||||
|
||||
/**
|
||||
* Checks if a feature flag is enabled and throws an exception if disabled.
|
||||
* Use this at the beginning of resolver methods for immediate feature flag checks.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* @ResolveField(() => String)
|
||||
* async organizer() {
|
||||
* checkFeatureFlag(FeatureFlags, 'ENABLE_NEXT_DOCKER_RELEASE');
|
||||
* return this.dockerOrganizerService.resolveOrganizer();
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* @param flags - The feature flag object containing boolean/truthy values
|
||||
* @param key - The key within the feature flag object to check
|
||||
* @throws ForbiddenException if the feature flag is disabled
|
||||
*/
|
||||
export function checkFeatureFlag<T extends Record<string, any>>(flags: T, key: keyof T): void {
|
||||
const isEnabled = Boolean(flags[key]);
|
||||
|
||||
if (!isEnabled) {
|
||||
throw new ForbiddenException(
|
||||
`Feature "${String(key)}" is currently disabled. This functionality is not available at this time.`
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
import { existsSync, readFileSync } from 'node:fs';
|
||||
import { basename, join } from 'node:path';
|
||||
|
||||
import type { ViteUserConfig } from 'vitest/config';
|
||||
import { viteCommonjs } from '@originjs/vite-plugin-commonjs';
|
||||
import nodeResolve from '@rollup/plugin-node-resolve';
|
||||
@@ -70,6 +73,29 @@ export default defineConfig(({ mode }): ViteUserConfig => {
|
||||
},
|
||||
},
|
||||
}),
|
||||
// Copy PHP files to assets directory
|
||||
{
|
||||
name: 'copy-php-files',
|
||||
buildStart() {
|
||||
const phpFiles = ['src/core/utils/plugins/wrapper.php'];
|
||||
phpFiles.forEach((file) => this.addWatchFile(file));
|
||||
},
|
||||
async generateBundle() {
|
||||
const phpFiles = ['src/core/utils/plugins/wrapper.php'];
|
||||
phpFiles.forEach((file) => {
|
||||
if (!existsSync(file)) {
|
||||
this.warn(`[copy-php-files] PHP file ${file} does not exist`);
|
||||
return;
|
||||
}
|
||||
const content = readFileSync(file);
|
||||
this.emitFile({
|
||||
type: 'asset',
|
||||
fileName: join('assets', basename(file)),
|
||||
source: content,
|
||||
});
|
||||
});
|
||||
},
|
||||
},
|
||||
],
|
||||
define: {
|
||||
// Allows vite to preserve process.env variables and not hardcode them
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{
|
||||
"name": "unraid-monorepo",
|
||||
"private": true,
|
||||
"version": "4.19.1",
|
||||
"version": "4.21.0",
|
||||
"scripts": {
|
||||
"build": "pnpm -r build",
|
||||
"build:watch": " pnpm -r --parallel build:watch",
|
||||
"build:watch": "pnpm -r --parallel --filter '!@unraid/ui' build:watch",
|
||||
"codegen": "pnpm -r codegen",
|
||||
"dev": "pnpm -r dev",
|
||||
"unraid:deploy": "pnpm -r unraid:deploy",
|
||||
|
||||
295
packages/unraid-shared/src/util/__tests__/processing.test.ts
Normal file
295
packages/unraid-shared/src/util/__tests__/processing.test.ts
Normal file
@@ -0,0 +1,295 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { AsyncMutex } from '../processing.js';
|
||||
|
||||
describe('AsyncMutex', () => {
|
||||
|
||||
describe('constructor-based operation', () => {
|
||||
it('should execute the default operation when do() is called without parameters', async () => {
|
||||
const mockOperation = vi.fn().mockResolvedValue('result');
|
||||
const mutex = new AsyncMutex(mockOperation);
|
||||
|
||||
const result = await mutex.do();
|
||||
|
||||
expect(result).toBe('result');
|
||||
expect(mockOperation).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should return the same promise when multiple calls are made concurrently', async () => {
|
||||
let resolveOperation: (value: string) => void;
|
||||
const operationPromise = new Promise<string>((resolve) => {
|
||||
resolveOperation = resolve;
|
||||
});
|
||||
const mockOperation = vi.fn().mockReturnValue(operationPromise);
|
||||
const mutex = new AsyncMutex(mockOperation);
|
||||
|
||||
const promise1 = mutex.do();
|
||||
const promise2 = mutex.do();
|
||||
const promise3 = mutex.do();
|
||||
|
||||
expect(mockOperation).toHaveBeenCalledTimes(1);
|
||||
expect(promise1).toBe(promise2);
|
||||
expect(promise2).toBe(promise3);
|
||||
|
||||
resolveOperation!('result');
|
||||
const [result1, result2, result3] = await Promise.all([promise1, promise2, promise3]);
|
||||
|
||||
expect(result1).toBe('result');
|
||||
expect(result2).toBe('result');
|
||||
expect(result3).toBe('result');
|
||||
});
|
||||
|
||||
it('should allow new operations after the first completes', async () => {
|
||||
const mockOperation = vi.fn()
|
||||
.mockResolvedValueOnce('first')
|
||||
.mockResolvedValueOnce('second');
|
||||
const mutex = new AsyncMutex(mockOperation);
|
||||
|
||||
const result1 = await mutex.do();
|
||||
expect(result1).toBe('first');
|
||||
expect(mockOperation).toHaveBeenCalledTimes(1);
|
||||
|
||||
const result2 = await mutex.do();
|
||||
expect(result2).toBe('second');
|
||||
expect(mockOperation).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should handle errors in the default operation', async () => {
|
||||
const error = new Error('Operation failed');
|
||||
const mockOperation = vi.fn().mockRejectedValue(error);
|
||||
const mutex = new AsyncMutex(mockOperation);
|
||||
|
||||
await expect(mutex.do()).rejects.toThrow(error);
|
||||
expect(mockOperation).toHaveBeenCalledTimes(1);
|
||||
|
||||
const secondOperation = vi.fn().mockResolvedValue('success');
|
||||
const mutex2 = new AsyncMutex(secondOperation);
|
||||
const result = await mutex2.do();
|
||||
expect(result).toBe('success');
|
||||
});
|
||||
});
|
||||
|
||||
describe('per-call operation', () => {
|
||||
it('should execute the provided operation', async () => {
|
||||
const mutex = new AsyncMutex<number>();
|
||||
const mockOperation = vi.fn().mockResolvedValue(42);
|
||||
|
||||
const result = await mutex.do(mockOperation);
|
||||
|
||||
expect(result).toBe(42);
|
||||
expect(mockOperation).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should return the same promise for concurrent calls with same operation type', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
let resolveOperation: (value: string) => void;
|
||||
const operationPromise = new Promise<string>((resolve) => {
|
||||
resolveOperation = resolve;
|
||||
});
|
||||
const mockOperation = vi.fn().mockReturnValue(operationPromise);
|
||||
|
||||
const promise1 = mutex.do(mockOperation);
|
||||
const promise2 = mutex.do(mockOperation);
|
||||
const promise3 = mutex.do(mockOperation);
|
||||
|
||||
expect(mockOperation).toHaveBeenCalledTimes(1);
|
||||
expect(promise1).toBe(promise2);
|
||||
expect(promise2).toBe(promise3);
|
||||
|
||||
resolveOperation!('shared-result');
|
||||
const [result1, result2, result3] = await Promise.all([promise1, promise2, promise3]);
|
||||
|
||||
expect(result1).toBe('shared-result');
|
||||
expect(result2).toBe('shared-result');
|
||||
expect(result3).toBe('shared-result');
|
||||
});
|
||||
|
||||
it('should allow different operations with different types', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
|
||||
const stringOp = vi.fn().mockResolvedValue('string-result');
|
||||
const numberOp = vi.fn().mockResolvedValue(123);
|
||||
|
||||
const stringResult = await mutex.do(stringOp);
|
||||
const numberResult = await mutex.do(numberOp);
|
||||
|
||||
expect(stringResult).toBe('string-result');
|
||||
expect(numberResult).toBe(123);
|
||||
expect(stringOp).toHaveBeenCalledTimes(1);
|
||||
expect(numberOp).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should handle errors in per-call operations', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
const error = new Error('Operation failed');
|
||||
const failingOp = vi.fn().mockRejectedValue(error);
|
||||
|
||||
await expect(mutex.do(failingOp)).rejects.toThrow(error);
|
||||
expect(failingOp).toHaveBeenCalledTimes(1);
|
||||
|
||||
const successOp = vi.fn().mockResolvedValue('success');
|
||||
const result = await mutex.do(successOp);
|
||||
expect(result).toBe('success');
|
||||
expect(successOp).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should throw an error when no operation is provided and no default is set', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
|
||||
await expect(mutex.do()).rejects.toThrow('No operation provided and no default operation set');
|
||||
});
|
||||
});
|
||||
|
||||
describe('mixed usage', () => {
|
||||
it('should allow overriding default operation with per-call operation', async () => {
|
||||
const defaultOp = vi.fn().mockResolvedValue('default');
|
||||
const mutex = new AsyncMutex(defaultOp);
|
||||
|
||||
const customOp = vi.fn().mockResolvedValue('custom');
|
||||
|
||||
const customResult = await mutex.do(customOp);
|
||||
expect(customResult).toBe('custom');
|
||||
expect(customOp).toHaveBeenCalledTimes(1);
|
||||
expect(defaultOp).not.toHaveBeenCalled();
|
||||
|
||||
const defaultResult = await mutex.do();
|
||||
expect(defaultResult).toBe('default');
|
||||
expect(defaultOp).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should share lock between default and custom operations', async () => {
|
||||
let resolveDefault: (value: string) => void;
|
||||
const defaultPromise = new Promise<string>((resolve) => {
|
||||
resolveDefault = resolve;
|
||||
});
|
||||
const defaultOp = vi.fn().mockReturnValue(defaultPromise);
|
||||
const mutex = new AsyncMutex(defaultOp);
|
||||
|
||||
const customOp = vi.fn().mockResolvedValue('custom');
|
||||
|
||||
const defaultCall = mutex.do();
|
||||
const customCall = mutex.do(customOp);
|
||||
|
||||
expect(defaultOp).toHaveBeenCalledTimes(1);
|
||||
expect(customOp).not.toHaveBeenCalled();
|
||||
expect(customCall).toBe(defaultCall);
|
||||
|
||||
resolveDefault!('default');
|
||||
const [defaultResult, customResult] = await Promise.all([defaultCall, customCall]);
|
||||
|
||||
expect(defaultResult).toBe('default');
|
||||
expect(customResult).toBe('default');
|
||||
});
|
||||
});
|
||||
|
||||
describe('timing and concurrency', () => {
|
||||
it('should handle sequential slow operations', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
let callCount = 0;
|
||||
|
||||
const slowOp = vi.fn().mockImplementation(() => {
|
||||
return new Promise((resolve) => {
|
||||
const currentCall = ++callCount;
|
||||
setTimeout(() => resolve(`result-${currentCall}`), 100);
|
||||
});
|
||||
});
|
||||
|
||||
const result1 = await mutex.do(slowOp);
|
||||
expect(result1).toBe('result-1');
|
||||
|
||||
const result2 = await mutex.do(slowOp);
|
||||
expect(result2).toBe('result-2');
|
||||
|
||||
expect(slowOp).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should deduplicate concurrent slow operations', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
let resolveOperation: (value: string) => void;
|
||||
|
||||
const slowOp = vi.fn().mockImplementation(() => {
|
||||
return new Promise<string>((resolve) => {
|
||||
resolveOperation = resolve;
|
||||
});
|
||||
});
|
||||
|
||||
const promises = [
|
||||
mutex.do(slowOp),
|
||||
mutex.do(slowOp),
|
||||
mutex.do(slowOp),
|
||||
mutex.do(slowOp),
|
||||
mutex.do(slowOp)
|
||||
];
|
||||
|
||||
expect(slowOp).toHaveBeenCalledTimes(1);
|
||||
|
||||
resolveOperation!('shared-slow-result');
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
expect(results).toEqual([
|
||||
'shared-slow-result',
|
||||
'shared-slow-result',
|
||||
'shared-slow-result',
|
||||
'shared-slow-result',
|
||||
'shared-slow-result'
|
||||
]);
|
||||
});
|
||||
|
||||
it('should properly clean up after operation completes', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
const op1 = vi.fn().mockResolvedValue('first');
|
||||
const op2 = vi.fn().mockResolvedValue('second');
|
||||
|
||||
await mutex.do(op1);
|
||||
expect(op1).toHaveBeenCalledTimes(1);
|
||||
|
||||
await mutex.do(op2);
|
||||
expect(op2).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should handle multiple rapid sequences of operations', async () => {
|
||||
const mutex = new AsyncMutex();
|
||||
const results: string[] = [];
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const op = vi.fn().mockResolvedValue(`result-${i}`);
|
||||
const result = await mutex.do(op);
|
||||
results.push(result as string);
|
||||
}
|
||||
|
||||
expect(results).toEqual(['result-0', 'result-1', 'result-2', 'result-3', 'result-4']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle operations that return undefined', async () => {
|
||||
const mutex = new AsyncMutex<undefined>();
|
||||
const op = vi.fn().mockResolvedValue(undefined);
|
||||
|
||||
const result = await mutex.do(op);
|
||||
expect(result).toBeUndefined();
|
||||
expect(op).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should handle operations that return null', async () => {
|
||||
const mutex = new AsyncMutex<null>();
|
||||
const op = vi.fn().mockResolvedValue(null);
|
||||
|
||||
const result = await mutex.do(op);
|
||||
expect(result).toBeNull();
|
||||
expect(op).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should handle nested operations correctly', async () => {
|
||||
const mutex = new AsyncMutex<string>();
|
||||
|
||||
const innerOp = vi.fn().mockResolvedValue('inner');
|
||||
const outerOp = vi.fn().mockImplementation(async () => {
|
||||
return 'outer';
|
||||
});
|
||||
|
||||
const result = await mutex.do(outerOp);
|
||||
expect(result).toBe('outer');
|
||||
expect(outerOp).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -31,3 +31,119 @@ export function makeSafeRunner(onError: (error: unknown) => void) {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
type AsyncOperation<T> = () => Promise<T>;
|
||||
|
||||
/**
|
||||
* A mutex for asynchronous operations that ensures only one operation runs at a time.
|
||||
*
|
||||
* When multiple callers attempt to execute operations simultaneously, they will all
|
||||
* receive the same promise from the currently running operation, effectively deduplicating
|
||||
* concurrent calls. This is useful for expensive operations like API calls, file operations,
|
||||
* or database queries that should not be executed multiple times concurrently.
|
||||
*
|
||||
* @template T - The default return type for operations when using a default operation
|
||||
*
|
||||
* @example
|
||||
* // Basic usage with explicit operations
|
||||
* const mutex = new AsyncMutex();
|
||||
*
|
||||
* // Multiple concurrent calls will deduplicate
|
||||
* const [result1, result2, result3] = await Promise.all([
|
||||
* mutex.do(() => fetch('/api/data')),
|
||||
* mutex.do(() => fetch('/api/data')), // Same request, will get same promise
|
||||
* mutex.do(() => fetch('/api/data')) // Same request, will get same promise
|
||||
* ]);
|
||||
* // Only one fetch actually happens
|
||||
*
|
||||
* @example
|
||||
* // Usage with a default operation
|
||||
* const dataLoader = new AsyncMutex(() =>
|
||||
* fetch('/api/expensive-data').then(res => res.json())
|
||||
* );
|
||||
*
|
||||
* const data1 = await dataLoader.do(); // Executes the fetch
|
||||
* const data2 = await dataLoader.do(); // If first promise is finished, a new fetch is executed
|
||||
*/
|
||||
export class AsyncMutex<T = unknown> {
|
||||
private currentOperation: Promise<T> | null = null;
|
||||
private defaultOperation?: AsyncOperation<T>;
|
||||
|
||||
/**
|
||||
* Creates a new AsyncMutex instance.
|
||||
*
|
||||
* @param operation - Optional default operation to execute when calling `do()` without arguments.
|
||||
* This is useful when you have a specific operation that should be deduplicated.
|
||||
*
|
||||
* @example
|
||||
* // Without default operation (shared mutex)
|
||||
* const mutex = new AsyncMutex();
|
||||
* const promise1 = mutex.do(() => someAsyncWork());
|
||||
* const promise2 = mutex.do(() => someOtherAsyncWork());
|
||||
*
|
||||
* // Both promises will be the same
|
||||
* expect(await promise1).toBe(await promise2);
|
||||
*
|
||||
* // After the first operation completes, new operations can run
|
||||
* await promise1;
|
||||
* const newPromise = mutex.do(() => someOtherAsyncWork()); // This will execute
|
||||
*
|
||||
* @example
|
||||
* // With default operation (deduplicating a specific operation)
|
||||
* const dataMutex = new AsyncMutex(() => loadExpensiveData());
|
||||
* await dataMutex.do(); // Executes loadExpensiveData()
|
||||
*/
|
||||
constructor(operation?: AsyncOperation<T>) {
|
||||
this.defaultOperation = operation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the provided operation, ensuring only one runs at a time.
|
||||
*
|
||||
* If an operation is already running, all subsequent calls will receive
|
||||
* the same promise from the currently running operation. This effectively
|
||||
* deduplicates concurrent calls to the same expensive operation.
|
||||
*
|
||||
* @param operation - Optional operation to execute. If not provided, uses the default operation.
|
||||
* @returns Promise that resolves with the result of the operation
|
||||
* @throws Error if no operation is provided and no default operation was set
|
||||
*
|
||||
* @example
|
||||
* const mutex = new AsyncMutex();
|
||||
*
|
||||
* // These will all return the same promise
|
||||
* const promise1 = mutex.do(() => fetch('/api/data'));
|
||||
* const promise2 = mutex.do(() => fetch('/api/other')); // Still gets first promise!
|
||||
* const promise3 = mutex.do(() => fetch('/api/another')); // Still gets first promise!
|
||||
*
|
||||
* // After the first operation completes, new operations can run
|
||||
* await promise1;
|
||||
* const newPromise = mutex.do(() => fetch('/api/new')); // This will execute
|
||||
*/
|
||||
do(operation?: AsyncOperation<T>): Promise<T> {
|
||||
if (this.currentOperation) {
|
||||
return this.currentOperation;
|
||||
}
|
||||
const op = operation ?? this.defaultOperation;
|
||||
if (!op) {
|
||||
return Promise.reject(
|
||||
new Error("No operation provided and no default operation set")
|
||||
);
|
||||
}
|
||||
const safeOp = () => {
|
||||
try {
|
||||
return op();
|
||||
} catch (error) {
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
const promise = safeOp().finally(() => {
|
||||
if (this.currentOperation === promise) {
|
||||
this.currentOperation = null;
|
||||
}
|
||||
});
|
||||
this.currentOperation = promise;
|
||||
return promise;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@unraid/connect-plugin",
|
||||
"version": "4.19.1",
|
||||
"version": "4.21.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"commander": "14.0.0",
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
This folder is PUBLIC. Please be aware of this when using this on Unraid OS
|
||||
@@ -2,6 +2,33 @@
|
||||
# Unraid API Installation Verification Script
|
||||
# Checks that critical files are installed correctly
|
||||
|
||||
# Function to check for non-bash shells
|
||||
check_shell() {
|
||||
# This script runs with #!/bin/bash shebang
|
||||
# On Unraid, users may configure bash to load other shells through .bashrc
|
||||
# We check if the current process ($$) is actually bash, not another shell
|
||||
# Using $$ is correct here - we need to detect if THIS process is running the expected bash
|
||||
local current_shell
|
||||
current_shell=$(ps -o comm= -p $$)
|
||||
|
||||
# Remove any path and get just the shell name
|
||||
current_shell=$(basename "$current_shell")
|
||||
|
||||
if [[ "$current_shell" != "bash" ]]; then
|
||||
echo "Unsupported shell detected: $current_shell" >&2
|
||||
echo "Unraid scripts require bash but your system is configured to use $current_shell for scripts." >&2
|
||||
echo "This can cause infinite loops or unexpected behavior when Unraid scripts execute." >&2
|
||||
echo "Please configure $current_shell to only activate for interactive shells." >&2
|
||||
echo "Add this check to your ~/.bashrc or /etc/profile before starting $current_shell:" >&2
|
||||
echo " [[ \$- == *i* ]] && exec $current_shell" >&2
|
||||
echo "This ensures $current_shell only starts for interactive sessions, not scripts." >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run shell check first
|
||||
check_shell
|
||||
|
||||
echo "Performing comprehensive installation verification..."
|
||||
|
||||
# Define critical files to check (POSIX-compliant, no arrays)
|
||||
|
||||
18
pnpm-lock.yaml
generated
18
pnpm-lock.yaml
generated
@@ -164,8 +164,8 @@ importers:
|
||||
specifier: 1.0.2
|
||||
version: 1.0.2
|
||||
cron:
|
||||
specifier: 4.3.3
|
||||
version: 4.3.3
|
||||
specifier: 4.3.0
|
||||
version: 4.3.0
|
||||
cross-fetch:
|
||||
specifier: 4.1.0
|
||||
version: 4.1.0
|
||||
@@ -4201,9 +4201,6 @@ packages:
|
||||
'@types/luxon@3.6.2':
|
||||
resolution: {integrity: sha512-R/BdP7OxEMc44l2Ex5lSXHoIXTB2JLNa3y2QISIbr58U/YcsffyQrYW//hZSdrfxrjRZj3GcUoxMPGdO8gSYuw==}
|
||||
|
||||
'@types/luxon@3.7.1':
|
||||
resolution: {integrity: sha512-H3iskjFIAn5SlJU7OuxUmTEpebK6TKB8rxZShDslBMZJ5u9S//KM1sbdAisiSrqwLQncVjnpi2OK2J51h+4lsg==}
|
||||
|
||||
'@types/mdx@2.0.13':
|
||||
resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==}
|
||||
|
||||
@@ -5868,10 +5865,6 @@ packages:
|
||||
resolution: {integrity: sha512-ciiYNLfSlF9MrDqnbMdRWFiA6oizSF7kA1osPP9lRzNu0Uu+AWog1UKy7SkckiDY2irrNjeO6qLyKnXC8oxmrw==}
|
||||
engines: {node: '>=18.x'}
|
||||
|
||||
cron@4.3.3:
|
||||
resolution: {integrity: sha512-B/CJj5yL3sjtlun6RtYHvoSB26EmQ2NUmhq9ZiJSyKIM4K/fqfh9aelDFlIayD2YMeFZqWLi9hHV+c+pq2Djkw==}
|
||||
engines: {node: '>=18.x'}
|
||||
|
||||
croner@4.1.97:
|
||||
resolution: {integrity: sha512-/f6gpQuxDaqXu+1kwQYSckUglPaOrHdbIlBAu0YuW8/Cdb45XwXYNUBXg3r/9Mo6n540Kn/smKcZWko5x99KrQ==}
|
||||
|
||||
@@ -14806,8 +14799,6 @@ snapshots:
|
||||
|
||||
'@types/luxon@3.6.2': {}
|
||||
|
||||
'@types/luxon@3.7.1': {}
|
||||
|
||||
'@types/mdx@2.0.13': {}
|
||||
|
||||
'@types/methods@1.1.4': {}
|
||||
@@ -16673,11 +16664,6 @@ snapshots:
|
||||
'@types/luxon': 3.6.2
|
||||
luxon: 3.6.1
|
||||
|
||||
cron@4.3.3:
|
||||
dependencies:
|
||||
'@types/luxon': 3.7.1
|
||||
luxon: 3.7.1
|
||||
|
||||
croner@4.1.97: {}
|
||||
|
||||
cross-fetch@3.2.0:
|
||||
|
||||
154
readme.md
154
readme.md
@@ -1,5 +1,6 @@
|
||||
<!-- Adapted from: https://github.com/othneildrew/Best-README-Template -->
|
||||
<!-- Improved compatibility of back to top link: See: https://github.com/othneildrew/Best-README-Template/pull/73 -->
|
||||
|
||||
<a id="readme-top"></a>
|
||||
|
||||
<!-- PROJECT SHIELDS -->
|
||||
@@ -91,9 +92,10 @@
|
||||
</details>
|
||||
|
||||
<!-- ABOUT THE PROJECT -->
|
||||
|
||||
## About The Project
|
||||
|
||||
<!-- [![Product Name Screen Shot][product-screenshot]](https://unraid.net)
|
||||
<!-- [![Product Name Screen Shot][product-screenshot]](https://unraid.net)
|
||||
|
||||
<p align="right">(<a href="#readme-top">back to top</a>)</p> -->
|
||||
|
||||
@@ -108,6 +110,7 @@
|
||||
<p align="right">(<a href="#readme-top">back to top</a>)</p>
|
||||
|
||||
<!-- GETTING STARTED -->
|
||||
|
||||
## Getting Started
|
||||
|
||||
This section will guide you through the steps necessary to get the monorepo projects running and
|
||||
@@ -117,13 +120,32 @@ communicating with each other.
|
||||
|
||||
Make sure the following software is installed before proceeding.
|
||||
|
||||
* Bash
|
||||
* Docker (for macOS folks, Orbstack works too)
|
||||
* [Node.js (v22)][Node-url]
|
||||
* [Just](https://github.com/casey/just) (optional)
|
||||
* libvirt (macOS folks can run `brew install libvirt`)
|
||||
* rclone (for development)
|
||||
* An [Unraid][Unraid-url] server for development
|
||||
- Bash
|
||||
- Docker (for macOS folks, Orbstack works too)
|
||||
- [Node.js (v22)][Node-url]
|
||||
- [pnpm](https://pnpm.io/) (v9.0+) - Install with `npm install -g pnpm`
|
||||
- [Just](https://github.com/casey/just) (optional)
|
||||
- libvirt (macOS folks can run `brew install libvirt`)
|
||||
- rclone (v1.70+) - **Important:** Version 1.70 or higher is required
|
||||
- jq - JSON processor for scripts
|
||||
- An [Unraid][Unraid-url] server for development
|
||||
|
||||
#### Ubuntu/WSL Users
|
||||
|
||||
For Ubuntu or WSL users, note that the default Ubuntu repositories may have older versions of rclone. You'll need rclone v1.70 or higher, which can be obtained from the [rclone releases page](https://github.com/rclone/rclone/releases).
|
||||
|
||||
#### Verify Prerequisites
|
||||
|
||||
After installation, verify your dependencies:
|
||||
|
||||
```sh
|
||||
# Verify installations and versions
|
||||
node --version # Should be v22.x
|
||||
pnpm --version # Should be v9.0+
|
||||
rclone version # Should be v1.70+
|
||||
jq --version # Should be installed
|
||||
docker --version # Should be installed
|
||||
```
|
||||
|
||||
#### Alternative: Using Nix Flake
|
||||
|
||||
@@ -154,25 +176,86 @@ Once you have your key pair, add your public SSH key to your Unraid server:
|
||||
cd api
|
||||
```
|
||||
|
||||
If using Nix, enter the development environment:
|
||||
|
||||
```sh
|
||||
nix develop
|
||||
```
|
||||
|
||||
2. Run the monorepo setup command.
|
||||
If using Nix, enter the development environment:
|
||||
|
||||
```sh
|
||||
pnpm install
|
||||
nix develop
|
||||
```
|
||||
|
||||
3. Run the build watcher to build the components and serve a local plugin file that can be installed on your Unraid server.
|
||||
2. Install dependencies and verify they're correctly installed:
|
||||
|
||||
```sh
|
||||
pnpm build:watch
|
||||
# Install all monorepo dependencies
|
||||
pnpm install
|
||||
|
||||
# The install script will automatically check for required dependencies
|
||||
# and their versions (rclone v1.70+, jq, pnpm, etc.)
|
||||
```
|
||||
|
||||
Navigate to Plugins->Install and install the local plugin file that is output to the console.
|
||||
3. Build the project:
|
||||
|
||||
```sh
|
||||
# Build individual packages first (from root directory)
|
||||
cd api && pnpm build && cd ..
|
||||
cd web && pnpm build && cd ..
|
||||
|
||||
# Then build the plugin if needed
|
||||
cd plugin && pnpm build && cd ..
|
||||
```
|
||||
|
||||
Note: The packages must be built in order as the plugin depends on the API build artifacts.
|
||||
|
||||
### Development Modes
|
||||
|
||||
The project supports two development modes:
|
||||
|
||||
#### Mode 1: Build Watcher with Local Plugin
|
||||
|
||||
This mode builds the plugin continuously and serves it locally for installation on your Unraid server:
|
||||
|
||||
```sh
|
||||
# From the root directory (api/)
|
||||
pnpm build:watch
|
||||
```
|
||||
|
||||
This command will output a local plugin URL that you can install on your Unraid server by navigating to Plugins → Install Plugin. Be aware it will take a *while* to build the first time.
|
||||
|
||||
#### Mode 2: Development Servers
|
||||
|
||||
For active development with hot-reload:
|
||||
|
||||
```sh
|
||||
# From the root directory - runs all dev servers concurrently
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Or run individual development servers:
|
||||
|
||||
```sh
|
||||
# API server (GraphQL backend at http://localhost:3001)
|
||||
cd api && pnpm dev
|
||||
|
||||
# Web interface (Nuxt frontend at http://localhost:3000)
|
||||
cd web && pnpm dev
|
||||
```
|
||||
|
||||
### Building the Full Plugin
|
||||
|
||||
To build the complete plugin package (.plg file):
|
||||
|
||||
```sh
|
||||
# From the root directory (api/)
|
||||
pnpm build:plugin
|
||||
|
||||
# The plugin will be created in plugin/dynamix.unraid.net.plg
|
||||
```
|
||||
|
||||
To deploy the plugin to your Unraid server:
|
||||
|
||||
```sh
|
||||
# Replace SERVER_IP with your Unraid server's IP address
|
||||
pnpm unraid:deploy SERVER_IP
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> View other workflows (local dev, etc.) in the [Developer Workflows](./api/docs/developer/workflows.md)
|
||||
@@ -180,6 +263,7 @@ Once you have your key pair, add your public SSH key to your Unraid server:
|
||||
<p align="right">(<a href="#readme-top">back to top</a>)</p>
|
||||
|
||||
<!-- USAGE EXAMPLES -->
|
||||
|
||||
## Usage
|
||||
|
||||
See [How to Use the API](./api/docs/public/how-to-use-the-api.md).
|
||||
@@ -201,6 +285,7 @@ See the [open issues](https://github.com/unraid/api/issues) for a full list of p
|
||||
<p align="right">(<a href="#readme-top">back to top</a>)</p> -->
|
||||
|
||||
<!-- CONTRIBUTING -->
|
||||
|
||||
## Contributing
|
||||
|
||||
For a complete guide on contributing to the project, including our code of conduct and development process, please see our [Contributing Guide](./CONTRIBUTING.md). Please read this before contributing.
|
||||
@@ -209,28 +294,30 @@ For a complete guide on contributing to the project, including our code of condu
|
||||
|
||||
For more information about development workflows, repository organization, and other technical details, please refer to the developer documentation inside this repository:
|
||||
|
||||
* [Development Guide](./api/docs/developer/development.md) - Setup, building, and debugging instructions
|
||||
* [Development Workflows](./api/docs/developer/workflows.md) - Detailed workflows for local development, building, and deployment
|
||||
* [Repository Organization](./api/docs/developer/repo-organization.md) - High-level architecture and project structure
|
||||
- [Development Guide](./api/docs/developer/development.md) - Setup, building, and debugging instructions
|
||||
- [Development Workflows](./api/docs/developer/workflows.md) - Detailed workflows for local development, building, and deployment
|
||||
- [Repository Organization](./api/docs/developer/repo-organization.md) - High-level architecture and project structure
|
||||
|
||||
### Work Intent Process
|
||||
|
||||
Before starting development work on this project, you must submit a Work Intent and have it approved by a core developer. This helps prevent duplicate work and ensures changes align with the project's goals.
|
||||
|
||||
1. **Create a Work Intent**
|
||||
* Go to [Issues → New Issue → Work Intent](https://github.com/unraid/api/issues/new?template=work_intent.md)
|
||||
* Fill out the brief template describing what you want to work on
|
||||
* The issue will be automatically labeled as `work-intent` and `unapproved`
|
||||
|
||||
- Go to [Issues → New Issue → Work Intent](https://github.com/unraid/api/issues/new?template=work_intent.md)
|
||||
- Fill out the brief template describing what you want to work on
|
||||
- The issue will be automatically labeled as `work-intent` and `unapproved`
|
||||
|
||||
2. **Wait for Approval**
|
||||
* A core developer will review your Work Intent
|
||||
* They may ask questions or suggest changes
|
||||
* Once approved, the `unapproved` label will be removed
|
||||
|
||||
- A core developer will review your Work Intent
|
||||
- They may ask questions or suggest changes
|
||||
- Once approved, the `unapproved` label will be removed
|
||||
|
||||
3. **Begin Development**
|
||||
* Only start coding after your Work Intent is approved
|
||||
* Follow the approach outlined in your approved Work Intent
|
||||
* Reference the Work Intent in your future PR
|
||||
- Only start coding after your Work Intent is approved
|
||||
- Follow the approach outlined in your approved Work Intent
|
||||
- Reference the Work Intent in your future PR
|
||||
|
||||
---
|
||||
|
||||
@@ -254,14 +341,16 @@ Don't forget to give the project a star! Thanks again!
|
||||
</a>
|
||||
|
||||
<!-- Community & Acknowledgements -->
|
||||
|
||||
## Community
|
||||
|
||||
🌐 [Forums](https://forums.unraid.net/)
|
||||
💬 [Discord](https://discord.unraid.net/)
|
||||
💬 [Discord](https://discord.unraid.net/)
|
||||
|
||||
<p align="right">(<a href="#readme-top">back to top</a>)</p>
|
||||
|
||||
<!-- CONTACT -->
|
||||
|
||||
## Contact
|
||||
|
||||
[@UnraidOfficial](https://twitter.com/UnraidOfficial) - <contact@unraid.net>
|
||||
@@ -272,6 +361,7 @@ Project Link: [https://github.com/unraid/api](https://github.com/unraid/api)
|
||||
|
||||
<!-- MARKDOWN LINKS & IMAGES -->
|
||||
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
|
||||
|
||||
[contributors-shield]: https://img.shields.io/github/contributors/unraid/api.svg?style=for-the-badge
|
||||
[contributors-url]: https://github.com/unraid/api/graphs/contributors
|
||||
[forks-shield]: https://img.shields.io/github/forks/unraid/api.svg?style=for-the-badge
|
||||
|
||||
153
scripts/cleanup-old-builds.sh
Executable file
153
scripts/cleanup-old-builds.sh
Executable file
@@ -0,0 +1,153 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to clean up old timestamped builds from Cloudflare R2
|
||||
# This will remove old .txz files with the pattern dynamix.unraid.net-YYYY.MM.DD.HHMM.txz
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${YELLOW}🧹 Cloudflare Old Build Cleanup Script${NC}"
|
||||
echo "This will delete old timestamped .txz builds from the preview bucket"
|
||||
echo ""
|
||||
|
||||
# Check for required environment variables
|
||||
if [ -z "$CF_ACCESS_KEY_ID" ] || [ -z "$CF_SECRET_ACCESS_KEY" ] || [ -z "$CF_ENDPOINT" ] || [ -z "$CF_BUCKET_PREVIEW" ]; then
|
||||
echo -e "${RED}❌ Error: Missing required environment variables${NC}"
|
||||
echo "Please set the following environment variables:"
|
||||
echo " - CF_ACCESS_KEY_ID"
|
||||
echo " - CF_SECRET_ACCESS_KEY"
|
||||
echo " - CF_ENDPOINT"
|
||||
echo " - CF_BUCKET_PREVIEW"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure AWS CLI for Cloudflare R2
|
||||
export AWS_ACCESS_KEY_ID="$CF_ACCESS_KEY_ID"
|
||||
export AWS_SECRET_ACCESS_KEY="$CF_SECRET_ACCESS_KEY"
|
||||
export AWS_DEFAULT_REGION="auto"
|
||||
|
||||
echo "Endpoint: $CF_ENDPOINT"
|
||||
echo "Bucket: $CF_BUCKET_PREVIEW"
|
||||
echo ""
|
||||
|
||||
# Optional: specify number of days to keep (default: 7)
|
||||
KEEP_DAYS=${1:-7}
|
||||
echo -e "${BLUE}Keeping builds from the last ${KEEP_DAYS} days${NC}"
|
||||
echo ""
|
||||
|
||||
# Calculate cutoff date
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# macOS
|
||||
CUTOFF_DATE=$(date -v -${KEEP_DAYS}d +"%Y.%m.%d")
|
||||
else
|
||||
# Linux
|
||||
CUTOFF_DATE=$(date -d "${KEEP_DAYS} days ago" +"%Y.%m.%d")
|
||||
fi
|
||||
|
||||
echo "Cutoff date: ${CUTOFF_DATE} (will delete builds older than this)"
|
||||
echo ""
|
||||
|
||||
# List all timestamped TXZ files in the unraid-api directory
|
||||
echo -e "${YELLOW}📋 Scanning for old builds...${NC}"
|
||||
|
||||
# Get all .txz files matching the pattern
|
||||
ALL_FILES=$(aws s3 ls "s3://${CF_BUCKET_PREVIEW}/unraid-api/" --endpoint-url "$CF_ENDPOINT" --recursive | \
|
||||
grep -E "dynamix\.unraid\.net-[0-9]{4}\.[0-9]{2}\.[0-9]{2}\.[0-9]{4}\.txz" | \
|
||||
awk '{print $4}' || true)
|
||||
|
||||
if [ -z "$ALL_FILES" ]; then
|
||||
echo -e "${GREEN}✅ No timestamped builds found${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Filter files older than cutoff
|
||||
OLD_FILES=""
|
||||
KEEP_FILES=""
|
||||
TOTAL_COUNT=0
|
||||
OLD_COUNT=0
|
||||
|
||||
while IFS= read -r file; do
|
||||
((TOTAL_COUNT++))
|
||||
# Extract date from filename (format: YYYY.MM.DD.HHMM)
|
||||
if [[ $file =~ ([0-9]{4}\.[0-9]{2}\.[0-9]{2})\.[0-9]{4}\.txz ]]; then
|
||||
FILE_DATE="${BASH_REMATCH[1]}"
|
||||
|
||||
# Compare dates (string comparison works for YYYY.MM.DD format)
|
||||
if [[ "$FILE_DATE" < "$CUTOFF_DATE" ]]; then
|
||||
OLD_FILES="${OLD_FILES}${file}\n"
|
||||
((OLD_COUNT++))
|
||||
else
|
||||
KEEP_FILES="${KEEP_FILES}${file}\n"
|
||||
fi
|
||||
fi
|
||||
done <<< "$ALL_FILES"
|
||||
|
||||
echo "Found ${TOTAL_COUNT} total timestamped builds"
|
||||
echo "Will delete ${OLD_COUNT} old builds"
|
||||
echo "Will keep $((TOTAL_COUNT - OLD_COUNT)) recent builds"
|
||||
echo ""
|
||||
|
||||
if [ "$OLD_COUNT" -eq 0 ]; then
|
||||
echo -e "${GREEN}✅ No old builds to delete${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Show sample of files to be deleted
|
||||
echo -e "${YELLOW}Sample of files to be deleted:${NC}"
|
||||
echo -e "$OLD_FILES" | head -5
|
||||
if [ "$OLD_COUNT" -gt 5 ]; then
|
||||
echo "... and $((OLD_COUNT - 5)) more"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Confirmation prompt
|
||||
read -p "Are you sure you want to delete these ${OLD_COUNT} old builds? (yes/no): " -r
|
||||
echo ""
|
||||
|
||||
if [[ ! $REPLY =~ ^[Yy]es$ ]]; then
|
||||
echo -e "${YELLOW}⚠️ Cleanup cancelled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Delete old files
|
||||
DELETED=0
|
||||
FAILED=0
|
||||
|
||||
echo -e "${YELLOW}🗑️ Deleting old builds...${NC}"
|
||||
while IFS= read -r file; do
|
||||
if [ -n "$file" ]; then
|
||||
echo -n "Deleting $(basename "$file")... "
|
||||
|
||||
if aws s3 rm "s3://${CF_BUCKET_PREVIEW}/${file}" \
|
||||
--endpoint-url "$CF_ENDPOINT" \
|
||||
>/dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓${NC}"
|
||||
((DELETED++))
|
||||
else
|
||||
echo -e "${RED}✗${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
fi
|
||||
done <<< "$(echo -e "$OLD_FILES")"
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}🎉 Cleanup complete!${NC}"
|
||||
echo " - Deleted: $DELETED old build(s)"
|
||||
if [ $FAILED -gt 0 ]; then
|
||||
echo -e " - Failed: ${RED}$FAILED${NC} build(s)"
|
||||
fi
|
||||
|
||||
# Show remaining recent builds
|
||||
echo ""
|
||||
echo -e "${BLUE}📦 Recent builds kept:${NC}"
|
||||
echo -e "$KEEP_FILES" | head -5
|
||||
KEEP_COUNT=$(echo -e "$KEEP_FILES" | grep -c . || echo 0)
|
||||
if [ "$KEEP_COUNT" -gt 5 ]; then
|
||||
echo "... and $((KEEP_COUNT - 5)) more"
|
||||
fi
|
||||
107
scripts/cleanup-pr-builds.sh
Executable file
107
scripts/cleanup-pr-builds.sh
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to delete all PR builds from Cloudflare R2
|
||||
# This will remove all artifacts under unraid-api/tag/PR* paths
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${YELLOW}🧹 Cloudflare PR Build Cleanup Script${NC}"
|
||||
echo "This will delete all PR builds from the preview bucket"
|
||||
echo ""
|
||||
|
||||
# Check for required environment variables
|
||||
if [ -z "$CF_ACCESS_KEY_ID" ] || [ -z "$CF_SECRET_ACCESS_KEY" ] || [ -z "$CF_ENDPOINT" ] || [ -z "$CF_BUCKET_PREVIEW" ]; then
|
||||
echo -e "${RED}❌ Error: Missing required environment variables${NC}"
|
||||
echo "Please set the following environment variables:"
|
||||
echo " - CF_ACCESS_KEY_ID"
|
||||
echo " - CF_SECRET_ACCESS_KEY"
|
||||
echo " - CF_ENDPOINT"
|
||||
echo " - CF_BUCKET_PREVIEW"
|
||||
echo ""
|
||||
echo "You can source them from your .env file or export them manually:"
|
||||
echo " export CF_ACCESS_KEY_ID='your-key-id'"
|
||||
echo " export CF_SECRET_ACCESS_KEY='your-secret-key'"
|
||||
echo " export CF_ENDPOINT='your-endpoint'"
|
||||
echo " export CF_BUCKET_PREVIEW='your-bucket'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure AWS CLI for Cloudflare R2
|
||||
export AWS_ACCESS_KEY_ID="$CF_ACCESS_KEY_ID"
|
||||
export AWS_SECRET_ACCESS_KEY="$CF_SECRET_ACCESS_KEY"
|
||||
export AWS_DEFAULT_REGION="auto"
|
||||
|
||||
echo "Endpoint: $CF_ENDPOINT"
|
||||
echo "Bucket: $CF_BUCKET_PREVIEW"
|
||||
echo ""
|
||||
|
||||
# List all PR directories
|
||||
echo -e "${YELLOW}📋 Listing all PR builds...${NC}"
|
||||
PR_DIRS=$(aws s3 ls "s3://${CF_BUCKET_PREVIEW}/unraid-api/tag/" --endpoint-url "$CF_ENDPOINT" 2>/dev/null | grep "PRE PR" | awk '{print $2}' || true)
|
||||
|
||||
if [ -z "$PR_DIRS" ]; then
|
||||
echo -e "${GREEN}✅ No PR builds found to clean up${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Count PR builds
|
||||
PR_COUNT=$(echo "$PR_DIRS" | wc -l | tr -d ' ')
|
||||
echo -e "Found ${YELLOW}${PR_COUNT}${NC} PR build(s):"
|
||||
echo "$PR_DIRS"
|
||||
echo ""
|
||||
|
||||
# Confirmation prompt
|
||||
read -p "Are you sure you want to delete ALL these PR builds? (yes/no): " -r
|
||||
echo ""
|
||||
|
||||
if [[ ! $REPLY =~ ^[Yy]es$ ]]; then
|
||||
echo -e "${YELLOW}⚠️ Cleanup cancelled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Delete each PR directory
|
||||
DELETED=0
|
||||
FAILED=0
|
||||
|
||||
for PR_DIR in $PR_DIRS; do
|
||||
PR_NUM=${PR_DIR%/} # Remove trailing slash
|
||||
echo -n "Deleting $PR_NUM... "
|
||||
|
||||
if aws s3 rm "s3://${CF_BUCKET_PREVIEW}/unraid-api/tag/${PR_NUM}" \
|
||||
--recursive \
|
||||
--endpoint-url "$CF_ENDPOINT" \
|
||||
>/dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓${NC}"
|
||||
((DELETED++))
|
||||
else
|
||||
echo -e "${RED}✗${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}🎉 Cleanup complete!${NC}"
|
||||
echo " - Deleted: $DELETED PR build(s)"
|
||||
if [ $FAILED -gt 0 ]; then
|
||||
echo -e " - Failed: ${RED}$FAILED${NC} PR build(s)"
|
||||
fi
|
||||
|
||||
# Optional: List remaining items to verify
|
||||
echo ""
|
||||
echo -e "${YELLOW}📋 Verifying cleanup...${NC}"
|
||||
REMAINING=$(aws s3 ls "s3://${CF_BUCKET_PREVIEW}/unraid-api/tag/" --endpoint-url "$CF_ENDPOINT" 2>/dev/null | grep -c "PRE PR" || true)
|
||||
# Ensure REMAINING is a valid number
|
||||
REMAINING=${REMAINING:-0}
|
||||
echo "Remaining PR builds: $REMAINING"
|
||||
|
||||
if [ "$REMAINING" -eq 0 ]; then
|
||||
echo -e "${GREEN}✅ All PR builds successfully removed${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Some PR builds may still exist${NC}"
|
||||
fi
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@unraid/ui",
|
||||
"version": "4.19.1",
|
||||
"version": "4.21.0",
|
||||
"private": true,
|
||||
"license": "GPL-2.0-or-later",
|
||||
"type": "module",
|
||||
|
||||
@@ -322,8 +322,8 @@ describe('UserProfile.ce.vue', () => {
|
||||
themeStore.theme!.descriptionShow = true;
|
||||
await wrapper.vm.$nextTick();
|
||||
|
||||
// Look for the description in a span element
|
||||
let descriptionElement = wrapper.find('span.text-center.md\\:text-right');
|
||||
// Look for the description in a span element with v-html directive
|
||||
let descriptionElement = wrapper.find('span.hidden.text-center.text-base');
|
||||
expect(descriptionElement.exists()).toBe(true);
|
||||
expect(descriptionElement.html()).toContain(initialServerData.description);
|
||||
|
||||
@@ -331,13 +331,13 @@ describe('UserProfile.ce.vue', () => {
|
||||
await wrapper.vm.$nextTick();
|
||||
|
||||
// When descriptionShow is false, the element should not exist
|
||||
descriptionElement = wrapper.find('span.text-center.md\\:text-right');
|
||||
descriptionElement = wrapper.find('span.hidden.text-center.text-base');
|
||||
expect(descriptionElement.exists()).toBe(false);
|
||||
|
||||
themeStore.theme!.descriptionShow = true;
|
||||
await wrapper.vm.$nextTick();
|
||||
|
||||
descriptionElement = wrapper.find('span.text-center.md\\:text-right');
|
||||
descriptionElement = wrapper.find('span.hidden.text-center.text-base');
|
||||
expect(descriptionElement.exists()).toBe(true);
|
||||
expect(descriptionElement.html()).toContain(initialServerData.description);
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@unraid/web",
|
||||
"version": "4.19.1",
|
||||
"version": "4.21.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"license": "GPL-2.0-or-later",
|
||||
|
||||
@@ -146,4 +146,9 @@
|
||||
/* Style for Unraid progress frame */
|
||||
iframe#progressFrame {
|
||||
background-color: var(--background-color);
|
||||
}
|
||||
|
||||
/* Global input text color when SSO button is present (for login page) */
|
||||
body:has(unraid-sso-button) input {
|
||||
color: #1b1b1b !important;
|
||||
}
|
||||
@@ -158,7 +158,7 @@ const updateOsStatus = computed(() => {
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div class="mt-4 ml-4 flex flex-col gap-y-2">
|
||||
<div class="mt-4 ml-4 flex max-w-fit flex-col gap-y-2">
|
||||
<a
|
||||
:href="unraidLogoHeaderLink.href"
|
||||
:title="unraidLogoHeaderLink.title"
|
||||
|
||||
@@ -106,14 +106,14 @@ onMounted(() => {
|
||||
|
||||
<div class="relative z-10 flex h-full flex-row items-center justify-end gap-x-2">
|
||||
<div
|
||||
class="text-header-text-primary relative flex flex-col-reverse items-center border-0 text-base md:flex-row md:items-center"
|
||||
class="text-header-text-primary relative flex flex-col-reverse items-center border-0 text-base md:!flex-row md:!items-center"
|
||||
>
|
||||
<template v-if="description && theme?.descriptionShow">
|
||||
<span
|
||||
class="hidden text-center text-base md:inline-flex md:items-center md:text-right"
|
||||
class="hidden text-center text-base md:!inline-flex md:!items-center md:!text-right"
|
||||
v-html="description"
|
||||
/>
|
||||
<span class="text-header-text-secondary hidden px-2 md:inline-flex md:items-center"
|
||||
<span class="text-header-text-secondary hidden px-2 md:!inline-flex md:!items-center"
|
||||
>•</span
|
||||
>
|
||||
</template>
|
||||
|
||||
Reference in New Issue
Block a user