From 333093a20d12bc44c0759979d250768703457dd6 Mon Sep 17 00:00:00 2001 From: Eli Bosley Date: Sat, 24 May 2025 09:56:10 -0400 Subject: [PATCH] feat: setup initial backup stats --- .bivvy/abcd-climb.md | 8 + .bivvy/abcd-moves.json | 21 + .bivvy/k8P2-climb.md | 139 + .bivvy/k8P2-moves.json | 53 + .bivvy/x7K9-climb.md | 184 ++ .bivvy/x7K9-moves.json | 63 + .cursor/rules/bivvy.mdc | 181 ++ api/.env.development | 3 +- api/dev/api/backup/backup-jobs.json | 14 + api/generated-schema.graphql | 133 +- api/src/core/pubsub.ts | 1 + api/src/store/modules/paths.ts | 1 + .../resolvers/backup/backup-config.service.ts | 16 +- .../backup/backup-mutations.resolver.ts | 177 ++ .../graph/resolvers/backup/backup.model.ts | 38 +- .../graph/resolvers/backup/backup.module.ts | 3 +- .../graph/resolvers/backup/backup.resolver.ts | 184 +- .../unraid-api/graph/resolvers/base.model.ts | 1 + .../resolvers/mutation/mutation.model.ts | 8 + .../resolvers/mutation/mutation.resolver.ts | 6 + .../rclone/Remote Control _ API.html | 2626 +++++++++++++++++ .../resolvers/rclone/rclone-api.service.ts | 287 +- .../graph/resolvers/rclone/rclone.model.ts | 153 + unraid-ui/src/components.ts | 1 - .../form/lightswitch/Lightswitch.vue | 67 - .../src/components/form/lightswitch/index.ts | 1 - web/components/Backup/BackupJobConfig.vue | 308 +- web/components/Backup/BackupOverview.vue | 140 +- web/components/Backup/backup-jobs.query.ts | 163 +- web/composables/gql/gql.ts | 54 +- web/composables/gql/graphql.ts | 188 +- 31 files changed, 4656 insertions(+), 566 deletions(-) create mode 100644 .bivvy/abcd-climb.md create mode 100644 .bivvy/abcd-moves.json create mode 100644 .bivvy/k8P2-climb.md create mode 100644 .bivvy/k8P2-moves.json create mode 100644 .bivvy/x7K9-climb.md create mode 100644 .bivvy/x7K9-moves.json create mode 100644 .cursor/rules/bivvy.mdc create mode 100644 api/dev/api/backup/backup-jobs.json create mode 100644 api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts create mode 100644 api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html delete mode 100644 unraid-ui/src/components/form/lightswitch/Lightswitch.vue delete mode 100644 unraid-ui/src/components/form/lightswitch/index.ts diff --git a/.bivvy/abcd-climb.md b/.bivvy/abcd-climb.md new file mode 100644 index 000000000..72ca30a36 --- /dev/null +++ b/.bivvy/abcd-climb.md @@ -0,0 +1,8 @@ +--- +id: abcd +type: feature +description: This is an example Climb +--- +## Example PRD + +TODO \ No newline at end of file diff --git a/.bivvy/abcd-moves.json b/.bivvy/abcd-moves.json new file mode 100644 index 000000000..3f84260c1 --- /dev/null +++ b/.bivvy/abcd-moves.json @@ -0,0 +1,21 @@ +{ + "climb": "0000", + "moves": [ + { + "status": "complete", + "description": "install the dependencies", + "details": "install the deps listed as New Dependencies" + }, { + "status": "skip", + "description": "Write tests" + }, { + "status": "climbing", + "description": "Build the first part of the feature", + "rest": "true" + }, { + "status": "todo", + "description": "Build the last part of the feature", + "details": "After this, you'd ask the user if they want to return to write tests" + } + ] +} \ No newline at end of file diff --git a/.bivvy/k8P2-climb.md b/.bivvy/k8P2-climb.md new file mode 100644 index 000000000..10d3dd9dd --- /dev/null +++ b/.bivvy/k8P2-climb.md @@ -0,0 +1,139 @@ +**STARTFILE k8P2-climb.md** + +
+ k8P2 + bug + Fix RClone backup jobs not appearing in jobs list and missing status data +
+ None - this is a bug fix for existing functionality + None - working with existing backup service implementation + + - api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts (main RClone API service) + - api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts (backup mutations) + - web/components/Backup/BackupOverview.vue (frontend backup overview) + - web/components/Backup/backup-jobs.query.ts (GraphQL query for jobs) + - api/src/unraid-api/graph/resolvers/backup/backup-queries.resolver.ts (backup queries resolver) + + +## Problem Statement + +The newly implemented backup service has two critical issues: +1. **Jobs not appearing in non-system jobs list**: When users trigger backup jobs via the "Run Now" button in BackupOverview.vue, these jobs are not showing up in the jobs list query, even when `showSystemJobs: false` +2. **Missing job status data**: Jobs that are started don't return proper status information, making it impossible to track backup progress + +## Background + +This issue emerged immediately after implementing the new backup service. The backup functionality uses: +- RClone RC daemon for job execution via Unix socket +- GraphQL mutations for triggering backups (`triggerJob`, `initiateBackup`) +- Job grouping system with groups like `backup/manual` and `backup/${id}` +- Vue.js frontend with real-time job status monitoring + +## Root Cause Analysis Areas + +### 1. Job Group Classification +The current implementation sets job groups as: +- `backup/manual` for manual backups +- `backup/${id}` for configured job backups + +**Potential Issue**: The jobs query may be filtering these groups incorrectly, classifying user-initiated backups as "system jobs" + +### 2. RClone API Response Handling +**Potential Issue**: The `startBackup` method may not be properly handling or returning job metadata from RClone RC API responses + +### 3. Job Status Synchronization +**Potential Issue**: There may be a disconnect between job initiation and the jobs listing/status APIs + +### 4. Logging Deficiency +**Current Gap**: Insufficient logging around RClone API responses makes debugging difficult + +## Technical Requirements + +### Enhanced Logging +- Add comprehensive debug logging for all RClone API calls and responses +- Log job initiation parameters and returned job metadata +- Log job listing and filtering logic +- Add structured logging for job group classification + +### Job Classification Fix +- Ensure user-initiated backup jobs are properly classified as non-system jobs +- Review and fix job group filtering logic in the jobs query resolver +- Validate that job groups `backup/manual` and `backup/${id}` are treated as non-system + +### Status Data Flow +- Verify job ID propagation from RClone startBackup response +- Ensure job status API correctly retrieves and formats status data +- Fix any data transformation issues between RClone API and GraphQL responses + +### Data Model Consistency +- Ensure BackupJob GraphQL type includes all necessary fields (note: current linter error shows missing 'type' field) +- Verify job data structure consistency between API and frontend + +## Acceptance Criteria + +### Primary Fixes +1. **Jobs Visibility**: User-triggered backup jobs appear in the jobs list when `showSystemJobs: false` +2. **Status Data**: Job status data (progress, speed, ETA, etc.) is properly retrieved and displayed +3. **Job ID Tracking**: Job IDs are properly returned and can be used for status queries + +### Secondary Improvements +4. **Enhanced Logging**: Comprehensive logging for debugging RClone interactions +5. **Type Safety**: Fix TypeScript/linting errors in BackupOverview.vue +6. **System Jobs Investigation**: Document findings about excessive system jobs + +## Testing Approach + +### Manual Testing +1. Trigger backup via "Run Now" button in BackupOverview.vue +2. Verify job appears in running jobs list (with showSystemJobs: false) +3. Confirm job status data displays correctly (progress, speed, etc.) +4. Test both `triggerJob` (configured jobs) and `initiateBackup` (manual jobs) flows + +### API Testing +1. Verify RClone API responses contain expected job metadata +2. Test job listing API with various group filters +3. Validate job status API returns complete data + +### Edge Cases +1. Test behavior when RClone daemon is restarted +2. Test concurrent backup jobs +3. Test backup job cancellation/completion scenarios + +## Implementation Strategy + +### Phase 1: Debugging & Logging +- Add comprehensive logging to RClone API service +- Log all API responses and job metadata +- Add logging to job filtering logic + +### Phase 2: Job Classification Fix +- Fix job group filtering in backup queries resolver +- Ensure proper non-system job classification +- Test job visibility in frontend + +### Phase 3: Status Data Fix +- Fix job status data retrieval and formatting +- Ensure complete job metadata is available +- Fix TypeScript/GraphQL type issues + +### Phase 4: Validation & Testing +- Comprehensive testing of backup job lifecycle +- Validate all acceptance criteria +- Document system jobs investigation findings + +## Security Considerations +- Ensure logging doesn't expose sensitive backup configuration data +- Maintain proper authentication/authorization for backup operations +- Validate that job status queries don't leak information between users + +## Performance Considerations +- Ensure logging doesn't significantly impact performance +- Optimize job listing queries if necessary +- Consider caching strategies for frequently accessed job data + +## Known Constraints +- Must work with existing RClone RC daemon setup +- Cannot break existing backup functionality during fixes +- Must maintain backward compatibility with existing backup configurations +
+**ENDFILE** \ No newline at end of file diff --git a/.bivvy/k8P2-moves.json b/.bivvy/k8P2-moves.json new file mode 100644 index 000000000..9c3cf53a4 --- /dev/null +++ b/.bivvy/k8P2-moves.json @@ -0,0 +1,53 @@ +{ + "Climb": "k8P2", + "moves": [ + { + "status": "complete", + "description": "Investigate current backup jobs query resolver implementation", + "details": "Find and examine the backup-queries.resolver.ts to understand how jobs are currently filtered and what determines system vs non-system jobs" + }, + { + "status": "complete", + "description": "Add enhanced logging to RClone API service", + "details": "Add comprehensive debug logging to startBackup, listRunningJobs, and getJobStatus methods in rclone-api.service.ts to capture API responses and job metadata" + }, + { + "status": "complete", + "description": "Add logging to job filtering logic", + "details": "Add logging to the backup jobs query resolver to understand how jobs are being classified and filtered", + "rest": true + }, + { + "status": "complete", + "description": "Fix job group classification in backup queries resolver", + "details": "Ensure that jobs with groups 'backup/manual' and 'backup/{id}' are properly classified as non-system jobs" + }, + { + "status": "complete", + "description": "Verify job ID propagation from RClone responses", + "details": "Ensure that job IDs returned from RClone startBackup are properly captured and returned in GraphQL mutations" + }, + { + "status": "todo", + "description": "Fix job status data retrieval and formatting", + "details": "Ensure getJobStatus properly retrieves and formats all status data (progress, speed, ETA, etc.) for display in the frontend", + "rest": true + }, + { + "status": "todo", + "description": "Fix TypeScript errors in BackupOverview.vue", + "details": "Add missing 'type' field to BackupJob GraphQL type and fix any other type inconsistencies" + }, + { + "status": "todo", + "description": "Test job visibility and status data end-to-end", + "details": "Manually test triggering backup jobs via 'Run Now' button and verify they appear in jobs list with proper status data", + "rest": true + }, + { + "status": "todo", + "description": "Document system jobs investigation findings", + "details": "Investigate why there are many system jobs running and document findings for potential future work" + } + ] +} \ No newline at end of file diff --git a/.bivvy/x7K9-climb.md b/.bivvy/x7K9-climb.md new file mode 100644 index 000000000..1347861ec --- /dev/null +++ b/.bivvy/x7K9-climb.md @@ -0,0 +1,184 @@ +**STARTFILE x7K9-climb.md** + +
+ x7K9 + feature + Enhanced Backup Job Management System with disable/enable controls, manual triggering, and real-time progress monitoring +
+ No new external dependencies expected - leveraging existing GraphQL subscriptions infrastructure + None - building on existing backup system architecture + + - web/components/Backup/BackupJobConfig.vue (main UI component) + - web/components/Backup/backup-jobs.query.ts (GraphQL queries/mutations) + - api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts (GraphQL resolver) + - api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts (business logic) + - api/src/unraid-api/graph/resolvers/backup/backup.model.ts (GraphQL schema types) + + + ## Feature Overview + Enhance the existing backup job management system to provide better control and monitoring capabilities for users managing their backup operations. + + ## Purpose Statement + Users need granular control over their backup jobs with the ability to enable/disable individual jobs, manually trigger scheduled jobs on-demand, and monitor real-time progress of running backup operations. + + ## Problem Being Solved + - Users cannot easily disable/enable individual backup jobs without deleting them + - No way to manually trigger a scheduled backup job outside its schedule + - No real-time visibility into backup job progress once initiated + - Limited feedback on current backup operation status + + ## Success Metrics + - Users can toggle backup jobs on/off without losing configuration + - Users can manually trigger any configured backup job + - Real-time progress updates for active backup operations + - Improved user experience with immediate feedback + + ## Functional Requirements + + ### Job Control + - Toggle individual backup jobs enabled/disabled state + - Manual trigger functionality for any configured backup job + - Preserve all job configuration when disabling + - Visual indicators for job state (enabled/disabled/running) + + ### Progress Monitoring + - Real-time subscription for backup job progress + - Display progress percentage, speed, ETA, and transferred data + - Show currently running jobs in the UI + - Update job status in real-time without page refresh + + ### UI Enhancements + - Add enable/disable toggle controls to job cards + - Add "Run Now" button for manual triggering + - Progress indicators and status updates + - Better visual feedback for job states + + ## Technical Requirements + + ### GraphQL API + - Add mutation for enabling/disabling backup job configs + - Add mutation for manually triggering backup jobs by config ID + - Add subscription for real-time backup job progress updates + - Extend existing BackupJob type with progress fields + + ### Backend Services + - Enhance BackupConfigService with enable/disable functionality + - Add manual trigger capability that uses existing job configs + - Implement subscription resolver for real-time updates + - Ensure proper error handling and status reporting + + ### Frontend Implementation + - Add toggle controls to BackupJobConfig.vue + - Implement manual trigger buttons + - Subscribe to progress updates and display in UI + - Handle loading states and error conditions + + ## User Flow + + ### Disabling a Job + 1. User views backup job list + 2. User clicks toggle to disable a job + 3. Job status updates immediately + 4. Scheduled execution stops, configuration preserved + + ### Manual Triggering + 1. User clicks "Run Now" on any configured job + 2. System validates job configuration + 3. Backup initiates immediately + 4. User sees real-time progress updates + + ### Progress Monitoring + 1. User initiates backup (scheduled or manual) + 2. Progress subscription automatically activates + 3. Real-time updates show in UI + 4. Completion status updates when job finishes + + ## API Specifications + + ### New Mutations (Nested Pattern) + Following the established pattern from ArrayMutations, create BackupMutations: + ```graphql + type BackupMutations { + toggleJobConfig(id: String!, enabled: Boolean!): BackupJobConfig + triggerJob(configId: String!): BackupStatus + } + ``` + + ### Implementation Structure + - Create `BackupMutationsResolver` class similar to `ArrayMutationsResolver` + - Use `@ResolveField()` decorators instead of `@Mutation()` + - Add appropriate `@UsePermissions()` decorators + - Group all backup-related mutations under `BackupMutations` type + + ### New Subscription + ```graphql + backupJobProgress(jobId: String): BackupJob + ``` + + ### Enhanced Types + - Extend BackupJob with progress percentage + - Add jobConfigId reference to running jobs + - Include more detailed status information + + ### Frontend GraphQL Usage + ```graphql + mutation ToggleBackupJob($id: String!, $enabled: Boolean!) { + backup { + toggleJobConfig(id: $id, enabled: $enabled) { + id + enabled + updatedAt + } + } + } + + mutation TriggerBackupJob($configId: String!) { + backup { + triggerJob(configId: $configId) { + status + jobId + } + } + } + ``` + + ## Implementation Considerations + + ### Real-time Updates + - Use existing GraphQL subscription infrastructure + - Efficient polling of rclone API for progress data + - Proper cleanup of subscriptions when jobs complete + + ### State Management + - Update job configs atomically + - Handle concurrent operations gracefully + - Maintain consistency between scheduled and manual executions + + ### Error Handling + - Validate job configs before manual triggering + - Graceful degradation if progress updates fail + - Clear error messages for failed operations + + ## Testing Approach + + ### Test Cases + - Toggle job enabled/disabled state + - Manual trigger of backup jobs + - Real-time progress subscription functionality + - Error handling for invalid operations + - Concurrent job execution scenarios + + ### Acceptance Criteria + - Jobs can be disabled/enabled without data loss + - Manual triggers work for all valid job configurations + - Progress updates are accurate and timely + - UI responds appropriately to all state changes + - No memory leaks from subscription management + + ## Future Considerations + - Job scheduling modification (change cron without recreate) + - Backup job templates and bulk operations + - Advanced progress details (file-level progress) + - Job history and logging improvements +
+**ENDFILE** \ No newline at end of file diff --git a/.bivvy/x7K9-moves.json b/.bivvy/x7K9-moves.json new file mode 100644 index 000000000..bf6a60f34 --- /dev/null +++ b/.bivvy/x7K9-moves.json @@ -0,0 +1,63 @@ +{ + "Climb": "x7K9", + "moves": [ + { + "status": "done", + "description": "Create BackupMutations GraphQL type and resolver structure", + "details": "Add BackupMutations type to backup.model.ts, create backup-mutations.resolver.ts file, and move existing mutations (createBackupJobConfig, updateBackupJobConfig, deleteBackupJobConfig, initiateBackup) from BackupResolver to the new BackupMutationsResolver following the ArrayMutationsResolver pattern" + }, + { + "status": "done", + "description": "Implement toggleJobConfig mutation", + "details": "Add toggleJobConfig resolver method with proper permissions and update BackupConfigService to handle enable/disable functionality" + }, + { + "status": "done", + "description": "Implement triggerJob mutation", + "details": "Add triggerJob resolver method that manually triggers a backup job using existing config, with validation and error handling" + }, + { + "status": "done", + "description": "Add backupJobProgress subscription", + "details": "Create GraphQL subscription resolver for real-time backup job progress updates using existing rclone API polling", + "rest": true + }, + { + "status": "done", + "description": "Enhance BackupJob type with progress fields", + "details": "Add progress percentage, configId reference, and detailed status fields to BackupJob model" + }, + { + "status": "done", + "description": "Update frontend GraphQL queries and mutations", + "details": "Add new mutations and subscription to backup-jobs.query.ts following the nested mutation pattern" + }, + { + "status": "done", + "description": "Add toggle controls to BackupJobConfig.vue", + "details": "Add enable/disable toggle switches to each job card with proper state management and error handling" + }, + { + "status": "done", + "description": "Add manual trigger buttons to BackupJobConfig.vue", + "details": "Add 'Run Now' buttons with loading states and trigger the new mutation", + "rest": true + }, + { + "status": "done", + "description": "Implement progress monitoring in the UI", + "details": "Subscribe to backup job progress and display real-time updates in the job cards with progress bars and status" + }, + { + "status": "done", + "description": "Add visual indicators for job states", + "details": "Enhance job cards with better status indicators for enabled/disabled/running states and improve overall UX" + }, + { + "status": "todo", + "description": "Test integration and error handling", + "details": "Test all functionality including edge cases, error scenarios, and subscription cleanup", + "rest": true + } + ] +} \ No newline at end of file diff --git a/.cursor/rules/bivvy.mdc b/.cursor/rules/bivvy.mdc new file mode 100644 index 000000000..375c1ab3b --- /dev/null +++ b/.cursor/rules/bivvy.mdc @@ -0,0 +1,181 @@ +--- +description: Creating, working on, or closing a pitch(feature, bug, task, or exploration). +globs: +alwaysApply: false +--- +## Description +When a user requests to start a new Climb(feature, bug, task, or exploration), you will start a 2-phase process: Step 1 is to define a Product Requirements Document (PRD) via conversation with the user; Step 2 is to build a task list (a.k.a. "Moves") from that PRD. + +Once alerted, you will then begin working on the Climb, move-by-move, soliciting user feedback and approval. Moves can have "rest: true" on it, at which point you WILL ALWAYS stop after completion to check with the user on status. They can also have the status "skip" on them, in which case you will skip it and ASK the user at the end if they want to return to it. Once you reach the end of the Moves list and they've opted out of skipping, you will mark the Climb as complete in both the PRD and the Moves list, and move both files to the .bivvy/complete/ directory. + +IMPORTANT RULES: +1. YOU MUST STOP AND GET USER APPROVAL after: + - Creating the initial PRD draft + - Making any significant changes to the PRD + - Completing any move marked with "rest: true" +2. NEVER work on tasks marked as "skip" unless explicitly requested by the user +3. NEVER work ahead of the current move in the task list +4. ALWAYS follow the moves in order, one at a time + +## File locations +- Active PRDs are found in .bivvy/[id]-climb.md (PRD) and .bivvy/[id]-moves.json (i.e. the task list). +- Completed PRDs are moved to .bivvy/complete/[id]-climb.md and .bivvy/complete/[id]-moves.json +- The [id] is a 4-character string where each character can be [A-z0-9], e.g. "02b7" or "xK4p" (enforce randomness here, also check that the id doesn't already exist in the .bivvy/complete/ directory) + +## Climb +IMPORTANT: When collecting information, we need to know if this is a feature, bug, task, or exploration. The top of the PRD should always be: +**STARTFILE [id]-climb.md** + +
+ [id] + [feature|bug|task|exploration] + (description) + (Make sure to ask the user if there will be any new dependencies) + (You should think through this carefully) + (Please do an initial grep based on the information you gather / ask the user for relevant files that might not be obvious) + (See below, there is a lot that could go here) + +**ENDFILE** + +Note: no tasks / moves...just everything needed to carry them out + +The PRD will differ with every Climb, but here are some guidelines: +Key Components to Include +Feature Overview + +Feature Name and ID: Clear, unique identifier for the feature +Purpose Statement: Concise explanation of what the feature is and why it's valuable +Problem Being Solved: Specific user pain points or business needs addressed +Success Metrics: Measurable outcomes that indicate feature success + +Requirements + +Functional Requirements: Specific capabilities the feature must provide +Technical Requirements: Performance, security, and reliability expectations +User Requirements: How the feature should work from the user's perspective +Constraints: Technical limitations, business rules, or regulatory considerations + +Design and Implementation + +User Flow: Step-by-step journey through the feature +Architecture Overview: How this feature integrates with existing systems +Dependent Components: Other systems or features this feature relies on +API Specifications: Required endpoints, payloads, and responses +Data Models: Key data structures and relationships + +Development Details + +Relevant Files: Specific files or components that will be affected +Implementation Considerations: Technical approach and potential challenges +Dependencies: External services, libraries, or APIs required +Security Considerations: Authentication, authorization, data protection needs + +Testing Approach + +Test Cases: Critical scenarios to validate +Acceptance Criteria: Conditions that must be met for feature approval +Edge Cases: Unusual or boundary conditions that need special handling +Performance Requirements: Specific benchmarks for speed and reliability + +Design Assets + +Mockups/Wireframes: Visual representations of the UI (references or links) +User Interface Guidelines: Styling, interaction patterns, and accessibility requirements +Content Guidelines: Copy samples, terminology standards, messaging approach + +Future Considerations + +Scalability Plans: How the feature should evolve as usage grows +Enhancement Ideas: Potential future improvements outside current scope +Known Limitations: Acknowledged constraints in the current implementation + +Formatting Best Practices + +Be Specific: Avoid vague language; use precise descriptions +Use Clear Structure: Organize with consistent headers and formatting +Include Examples: Provide concrete examples when explaining complex functionality +Prioritize Requirements: Indicate which requirements are essential vs. nice-to-have +Link to Resources: Reference existing documentation, designs, or research +Keep It Concise: Focus on what's necessary; avoid unnecessary detail +Use Visual Aids: Include diagrams, flowcharts, or mockups when helpful +Define Technical Terms: Include a glossary if specialized terminology is used + +What to Avoid + +Prescribing Implementation Details: Focus on what, not how (unless necessary) +Including Task Lists: Leave specific tasks for project management tools +Rigid Timelines: PRDs describe requirements, not project schedules +Vague Goals: Ensure all success metrics are measurable +Overspecification: Allow room for engineering creativity in solutions +Ignoring Constraints: Acknowledge technical and business limitations +Excessive Jargon: Write for clarity across different team roles + +By following this framework, you'll create feature PRDs that provide clear direction while maintaining flexibility for implementation approaches, ultimately leading to better features and more efficient development. + +## Moves +Once the Climb is generated and approved by the user, generate the Moves list. +You should carefully consider the ORDER in which these tasks need to be completed. +The size of every move should be something an AI agent can carry out in 2-3 code changes. +Make sure to add reasonable {rest: true} along the way. +Moves have the statuses: todo|climbing|skip|complete + +Here is a sample moves file: +**STARTFILE [id]-moves.json** +{ + "Climb": "abcd", + "moves": [ + { + "status": "complete", + "description": "install the dependencies", + "details": "install the deps listed as New Dependencies" + }, { + "status": "skip", + "description": "Write tests" + }, { + "status": "climbing", + "description": "Build the first part of the feature", + "rest": "true" + }, { + "status": "todo", + "description": "Build the last part of the feature", + "details": "After this, you'd ask the user if they want to return to write tests" + } + ] +} +**ENDFILE** + +## Running +Creating the PRD: +- THIS NEEDS TO BE ITERATIVE! +- If you need to, ask the user for clarifying questions before starting +- CRITICAL: YOU MUST STOP after your first draft of the PRD and wait for user approval +- CRITICAL: YOU MUST STOP after any significant changes to the PRD +- The PRD must be approved before moving on to creating the moves list + +Creating and Managing Moves: +- After PRD approval, create the moves list +- CRITICAL: YOU MUST STOP after creating the initial moves list for user approval +- Moves marked as "skip" MUST NOT be worked on unless explicitly requested by the user +- NEVER work ahead or complete tasks out of order +- Each move should be completed and approved before moving to the next one +- If a move depends on a skipped move, YOU MUST ASK the user if they want to return to the skipped move first + +Updating [id]-moves.json: +- After EVERY code approval, you should update the moves.json file +- Move through the moves array until you hit a todo item, then move on to it +- NEVER work on moves marked as "skip" +- NEVER work ahead of the current move +- It is okay to check with the user if they want to move forward, but trust the process +- Make sure to update the statuses within the moves.json file + +Keeping track of the Climb +- IMPORTANT: EVERY TIME YOU USE THIS RULE, THE LAST LINE OF YOUR OUPUT SHOULD BE: "/|\ Bivvy Climb [id]" +- CRITICAL: Unless they are closing the Climb (see below) then do NOT keep track of the Climb. + +## Closing (or canceling) a Climb +- If the user asks to close a Climb, ask them if they want to "delete" it or "complete" it +- They can also do either without asking to "close" first +- If they delete a Climb, delete both the Climb and moves file by id +- If they close a Climb, move it to complete +- CRITICAL: STOP ADDING THE Climb-TRACKING TEXT TO RESPONSES +- CRITICAL: STOP USING THIS RULE UNTIL A NEW Climb IS STARTED \ No newline at end of file diff --git a/api/.env.development b/api/.env.development index 949bdc97e..4c83c7ed8 100644 --- a/api/.env.development +++ b/api/.env.development @@ -15,6 +15,7 @@ PATHS_ACTIVATION_BASE=./dev/activation PATHS_PASSWD=./dev/passwd PATHS_RCLONE_SOCKET=./dev/rclone-socket PATHS_LOG_BASE=./dev/log # Where we store logs +PATHS_BACKUP_JOBS=./dev/api/backup ENVIRONMENT="development" NODE_ENV="development" PORT="3001" @@ -26,4 +27,4 @@ BYPASS_PERMISSION_CHECKS=false BYPASS_CORS_CHECKS=true CHOKIDAR_USEPOLLING=true LOG_TRANSPORT=console -LOG_LEVEL=trace +LOG_LEVEL=debug # Change to trace for extremely noisy logging diff --git a/api/dev/api/backup/backup-jobs.json b/api/dev/api/backup/backup-jobs.json new file mode 100644 index 000000000..20f78b868 --- /dev/null +++ b/api/dev/api/backup/backup-jobs.json @@ -0,0 +1,14 @@ +[ + { + "id": "1ad3f9a9-f438-43b2-bdd5-976c7ca4b2f5", + "name": "test", + "sourcePath": "/Users/elibosley/Downloads", + "remoteName": "FlashBackup", + "destinationPath": "backup", + "schedule": "0 2 * * *", + "enabled": true, + "rcloneOptions": {}, + "createdAt": "2025-05-24T12:19:29.150Z", + "updatedAt": "2025-05-24T12:19:29.150Z" + } +] \ No newline at end of file diff --git a/api/generated-schema.graphql b/api/generated-schema.graphql index bca01c37b..91ece288f 100644 --- a/api/generated-schema.graphql +++ b/api/generated-schema.graphql @@ -758,6 +758,7 @@ enum Resource { ACTIVATION_CODE API_KEY ARRAY + BACKUP CLOUD CONFIG CONNECT @@ -883,6 +884,63 @@ type VmMutations { reset(id: PrefixedID!): Boolean! } +"""Backup related mutations""" +type BackupMutations { + """Create a new backup job configuration""" + createBackupJobConfig(input: CreateBackupJobConfigInput!): BackupJobConfig! + + """Update a backup job configuration""" + updateBackupJobConfig(id: String!, input: UpdateBackupJobConfigInput!): BackupJobConfig + + """Delete a backup job configuration""" + deleteBackupJobConfig(id: String!): Boolean! + + """Initiates a backup using a configured remote.""" + initiateBackup(input: InitiateBackupInput!): BackupStatus! + + """Toggle a backup job configuration enabled/disabled""" + toggleJobConfig(id: String!): BackupJobConfig + + """Manually trigger a backup job using existing configuration""" + triggerJob(id: PrefixedID!): BackupStatus! +} + +input CreateBackupJobConfigInput { + name: String! + sourcePath: String! + remoteName: String! + destinationPath: String! + schedule: String! + enabled: Boolean! = true + rcloneOptions: JSON +} + +input UpdateBackupJobConfigInput { + name: String + sourcePath: String + remoteName: String + destinationPath: String + schedule: String + enabled: Boolean + rcloneOptions: JSON +} + +input InitiateBackupInput { + """The name of the remote configuration to use for the backup.""" + remoteName: String! + + """Source path to backup.""" + sourcePath: String! + + """Destination path on the remote.""" + destinationPath: String! + + """ + Additional options for the backup operation, such as --dry-run or --transfers. + """ + options: JSON +} + """API Key related mutations""" type ApiKeyMutations { """Create an API key""" @@ -1036,7 +1094,7 @@ type RCloneRemote { type Backup implements Node { id: PrefixedID! - jobs: [BackupJob!]! + jobs(showSystemJobs: Boolean = false): [BackupJob!]! configs: [BackupJobConfig!]! """Get the status for the backup service""" @@ -1053,10 +1111,10 @@ type BackupStatus { type BackupJob { """Job ID""" - id: String! + id: PrefixedID! - """Job type (e.g., sync/copy)""" - type: String! + """RClone group for the job""" + group: String """Job status and statistics""" stats: JSON! @@ -1072,9 +1130,18 @@ type BackupJob { """Formatted ETA""" formattedEta: String + + """Progress percentage (0-100)""" + progressPercentage: Float + + """Configuration ID that triggered this job""" + configId: PrefixedID + + """Detailed status of the job""" + detailedStatus: String } -type BackupJobConfig { +type BackupJobConfig implements Node { id: PrefixedID! """Human-readable name for this backup job""" @@ -1112,7 +1179,7 @@ type BackupJobConfig { } type BackupJobConfigForm { - id: ID! + id: PrefixedID! dataSchema: JSON! uiSchema: JSON! } @@ -1674,7 +1741,7 @@ type Query { backupJobConfig(id: String!): BackupJobConfig """Get status of a specific backup job""" - backupJob(jobId: String!): BackupJob + backupJob(jobId: PrefixedID!): BackupJob """Get the JSON schema for backup job configuration form""" backupJobConfigForm(input: BackupJobConfigFormInput): BackupJobConfigForm! @@ -1719,21 +1786,10 @@ type Mutation { array: ArrayMutations! docker: DockerMutations! vm: VmMutations! + backup: BackupMutations! parityCheck: ParityCheckMutations! apiKey: ApiKeyMutations! rclone: RCloneMutations! - - """Create a new backup job configuration""" - createBackupJobConfig(input: CreateBackupJobConfigInput!): BackupJobConfig! - - """Update a backup job configuration""" - updateBackupJobConfig(id: String!, input: UpdateBackupJobConfigInput!): BackupJobConfig - - """Delete a backup job configuration""" - deleteBackupJobConfig(id: String!): Boolean! - - """Initiates a backup using a configured remote.""" - initiateBackup(input: InitiateBackupInput!): BackupStatus! updateApiSettings(input: ApiSettingsInput!): ConnectSettingsValues! connectSignIn(input: ConnectSignInInput!): Boolean! connectSignOut: Boolean! @@ -1751,42 +1807,6 @@ input NotificationData { link: String } -input CreateBackupJobConfigInput { - name: String! - sourcePath: String! - remoteName: String! - destinationPath: String! - schedule: String! - enabled: Boolean! = true - rcloneOptions: JSON -} - -input UpdateBackupJobConfigInput { - name: String - sourcePath: String - remoteName: String - destinationPath: String - schedule: String - enabled: Boolean - rcloneOptions: JSON -} - -input InitiateBackupInput { - """The name of the remote configuration to use for the backup.""" - remoteName: String! - - """Source path to backup.""" - sourcePath: String! - - """Destination path on the remote.""" - destinationPath: String! - - """ - Additional options for the backup operation, such as --dry-run or --transfers. - """ - options: JSON -} - input ApiSettingsInput { """ If true, the GraphQL sandbox will be enabled and available at /graphql. If false, the GraphQL sandbox will be disabled and only the production API will be available. @@ -1883,6 +1903,9 @@ type Subscription { serversSubscription: Server! parityHistorySubscription: ParityCheck! arraySubscription: UnraidArray! + + """Subscribe to real-time backup job progress updates""" + backupJobProgress(jobId: PrefixedID!): BackupJob } """Available authentication action verbs""" diff --git a/api/src/core/pubsub.ts b/api/src/core/pubsub.ts index b1dbf54e3..937e132f9 100644 --- a/api/src/core/pubsub.ts +++ b/api/src/core/pubsub.ts @@ -8,6 +8,7 @@ eventEmitter.setMaxListeners(30); export enum PUBSUB_CHANNEL { ARRAY = 'ARRAY', + BACKUP_JOB_PROGRESS = 'BACKUP_JOB_PROGRESS', DASHBOARD = 'DASHBOARD', DISPLAY = 'DISPLAY', INFO = 'INFO', diff --git a/api/src/store/modules/paths.ts b/api/src/store/modules/paths.ts index 3a70d38e7..45ae09421 100644 --- a/api/src/store/modules/paths.ts +++ b/api/src/store/modules/paths.ts @@ -71,6 +71,7 @@ const initialState = { ), webGuiBase: '/usr/local/emhttp/webGui' as const, identConfig: resolvePath(process.env.PATHS_IDENT_CONFIG ?? ('/boot/config/ident.cfg' as const)), + backupBase: resolvePath(process.env.PATHS_BACKUP_JOBS ?? ('/boot/config/api/backup/' as const)), }; // Derive asset paths from base paths diff --git a/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts b/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts index 67859b133..1edf601c2 100644 --- a/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts +++ b/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts @@ -2,10 +2,12 @@ import { Injectable, Logger } from '@nestjs/common'; import { SchedulerRegistry } from '@nestjs/schedule'; import { existsSync } from 'fs'; import { readFile, writeFile } from 'fs/promises'; +import { join } from 'path'; import { CronJob } from 'cron'; import { v4 as uuidv4 } from 'uuid'; +import { getters } from '@app/store/index.js'; import { BackupJobConfig, CreateBackupJobConfigInput, @@ -31,13 +33,15 @@ interface BackupJobConfigData { @Injectable() export class BackupConfigService { private readonly logger = new Logger(BackupConfigService.name); - private readonly configPath = '/boot/config/backup-jobs.json'; + private readonly configPath: string; private configs: Map = new Map(); constructor( private readonly rcloneService: RCloneService, private readonly schedulerRegistry: SchedulerRegistry ) { + const paths = getters.paths(); + this.configPath = join(paths.backupBase, 'backup-jobs.json'); this.loadConfigs(); } @@ -112,15 +116,19 @@ export class BackupConfigService { const result = await this.rcloneService['rcloneApiService'].startBackup({ srcPath: config.sourcePath, dstPath: `${config.remoteName}:${config.destinationPath}`, - options: config.rcloneOptions, + async: true, + group: `backup/${config.id}`, + options: config.rcloneOptions || {}, }); + const jobId = result.jobId || result.jobid; + config.lastRunAt = new Date().toISOString(); - config.lastRunStatus = `Started with job ID: ${result.jobId}`; + config.lastRunStatus = `Started with job ID: ${jobId}`; this.configs.set(config.id, config); await this.saveConfigs(); - this.logger.log(`Backup job ${config.name} started successfully: ${result.jobId}`); + this.logger.log(`Backup job ${config.name} started successfully: ${jobId}`); } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); config.lastRunAt = new Date().toISOString(); diff --git a/api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts b/api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts new file mode 100644 index 000000000..1303477e6 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts @@ -0,0 +1,177 @@ +import { Logger } from '@nestjs/common'; +import { Args, ResolveField, Resolver } from '@nestjs/graphql'; + +import { + AuthActionVerb, + AuthPossession, + UsePermissions, +} from '@app/unraid-api/graph/directives/use-permissions.directive.js'; +import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; +import { + BackupJobConfig, + BackupStatus, + CreateBackupJobConfigInput, + InitiateBackupInput, + UpdateBackupJobConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/backup.model.js'; +import { Resource } from '@app/unraid-api/graph/resolvers/base.model.js'; +import { BackupMutations } from '@app/unraid-api/graph/resolvers/mutation/mutation.model.js'; +import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js'; +import { PrefixedID } from '@app/unraid-api/graph/scalars/graphql-type-prefixed-id.js'; + +@Resolver(() => BackupMutations) +export class BackupMutationsResolver { + private readonly logger = new Logger(BackupMutationsResolver.name); + + constructor( + private readonly backupConfigService: BackupConfigService, + private readonly rcloneService: RCloneService + ) {} + + private async executeBackup( + sourcePath: string, + remoteName: string, + destinationPath: string, + options: Record = {}, + group: string + ): Promise { + try { + this.logger.log( + `Executing backup: ${sourcePath} -> ${remoteName}:${destinationPath} (group: ${group})` + ); + + const result = await this.rcloneService['rcloneApiService'].startBackup({ + srcPath: sourcePath, + dstPath: `${remoteName}:${destinationPath}`, + async: true, + group: group, + options: options, + }); + + this.logger.debug(`RClone startBackup result: ${JSON.stringify(result)}`); + + const jobId = result.jobid || result.jobId; + this.logger.log(`Backup job initiated successfully with ID: ${jobId}`); + + return { + status: 'Backup initiated successfully', + jobId: jobId, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error( + `Failed to execute backup: ${errorMessage}`, + error instanceof Error ? error.stack : undefined + ); + + return { + status: `Failed to initiate backup: ${errorMessage}`, + jobId: undefined, + }; + } + } + + @ResolveField(() => BackupJobConfig, { + description: 'Create a new backup job configuration', + }) + @UsePermissions({ + action: AuthActionVerb.CREATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async createBackupJobConfig( + @Args('input') input: CreateBackupJobConfigInput + ): Promise { + return this.backupConfigService.createBackupJobConfig(input); + } + + @ResolveField(() => BackupJobConfig, { + description: 'Update a backup job configuration', + nullable: true, + }) + @UsePermissions({ + action: AuthActionVerb.UPDATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async updateBackupJobConfig( + @Args('id') id: string, + @Args('input') input: UpdateBackupJobConfigInput + ): Promise { + return this.backupConfigService.updateBackupJobConfig(id, input); + } + + @ResolveField(() => Boolean, { + description: 'Delete a backup job configuration', + }) + @UsePermissions({ + action: AuthActionVerb.DELETE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async deleteBackupJobConfig(@Args('id') id: string): Promise { + return this.backupConfigService.deleteBackupJobConfig(id); + } + + @ResolveField(() => BackupStatus, { + description: 'Initiates a backup using a configured remote.', + }) + @UsePermissions({ + action: AuthActionVerb.CREATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async initiateBackup(@Args('input') input: InitiateBackupInput): Promise { + return this.executeBackup( + input.sourcePath, + input.remoteName, + input.destinationPath, + input.options || {}, + 'backup/manual' + ); + } + + @ResolveField(() => BackupJobConfig, { + description: 'Toggle a backup job configuration enabled/disabled', + nullable: true, + }) + @UsePermissions({ + action: AuthActionVerb.UPDATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async toggleJobConfig(@Args('id') id: string): Promise { + const existing = await this.backupConfigService.getBackupJobConfig(id); + if (!existing) return null; + + return this.backupConfigService.updateBackupJobConfig(id, { + enabled: !existing.enabled, + }); + } + + @ResolveField(() => BackupStatus, { + description: 'Manually trigger a backup job using existing configuration', + }) + @UsePermissions({ + action: AuthActionVerb.CREATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async triggerJob(@Args('id', { type: () => PrefixedID }) id: string): Promise { + const config = await this.backupConfigService.getBackupJobConfig(id); + if (!config) { + return { + status: 'Failed to trigger backup: Configuration not found', + jobId: undefined, + }; + } + + return this.executeBackup( + config.sourcePath, + config.remoteName, + config.destinationPath, + config.rcloneOptions || {}, + `backup/${id}` + ); + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.model.ts b/api/src/unraid-api/graph/resolvers/backup/backup.model.ts index baed83aed..a602ba7a7 100644 --- a/api/src/unraid-api/graph/resolvers/backup/backup.model.ts +++ b/api/src/unraid-api/graph/resolvers/backup/backup.model.ts @@ -1,18 +1,20 @@ -import { Field, ID, InputType, ObjectType } from '@nestjs/graphql'; +import { Field, InputType, ObjectType } from '@nestjs/graphql'; import { type Layout } from '@jsonforms/core'; import { IsBoolean, IsNotEmpty, IsObject, IsOptional, IsString, Matches } from 'class-validator'; import { GraphQLJSON } from 'graphql-scalars'; import { Node } from '@app/unraid-api/graph/resolvers/base.model.js'; +import { RCloneJob } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import { PrefixedID } from '@app/unraid-api/graph/scalars/graphql-type-prefixed-id.js'; import { DataSlice } from '@app/unraid-api/types/json-forms.js'; @ObjectType({ implements: () => Node, }) export class Backup extends Node { - @Field(() => [BackupJob]) - jobs!: BackupJob[]; + @Field(() => [RCloneJob]) + jobs!: RCloneJob[]; @Field(() => [BackupJobConfig]) configs!: BackupJobConfig[]; @@ -58,37 +60,15 @@ export class BackupStatus { jobId?: string; } -@ObjectType() -export class BackupJob { - @Field(() => String, { description: 'Job ID' }) - id!: string; - - @Field(() => String, { description: 'Job type (e.g., sync/copy)' }) - type!: string; - - @Field(() => GraphQLJSON, { description: 'Job status and statistics' }) - stats!: Record; - - @Field(() => String, { description: 'Formatted bytes transferred', nullable: true }) - formattedBytes?: string; - - @Field(() => String, { description: 'Formatted transfer speed', nullable: true }) - formattedSpeed?: string; - - @Field(() => String, { description: 'Formatted elapsed time', nullable: true }) - formattedElapsedTime?: string; - - @Field(() => String, { description: 'Formatted ETA', nullable: true }) - formattedEta?: string; -} - @ObjectType() export class RCloneWebGuiInfo { @Field() url!: string; } -@ObjectType() +@ObjectType({ + implements: () => Node, +}) export class BackupJobConfig extends Node { @Field(() => String, { description: 'Human-readable name for this backup job' }) name!: string; @@ -223,7 +203,7 @@ export class UpdateBackupJobConfigInput { @ObjectType() export class BackupJobConfigForm { - @Field(() => ID) + @Field(() => PrefixedID) id!: string; @Field(() => GraphQLJSON) diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.module.ts b/api/src/unraid-api/graph/resolvers/backup/backup.module.ts index 9738e8d6f..41cd2c0bb 100644 --- a/api/src/unraid-api/graph/resolvers/backup/backup.module.ts +++ b/api/src/unraid-api/graph/resolvers/backup/backup.module.ts @@ -2,13 +2,14 @@ import { Module } from '@nestjs/common'; import { ScheduleModule } from '@nestjs/schedule'; import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; +import { BackupMutationsResolver } from '@app/unraid-api/graph/resolvers/backup/backup-mutations.resolver.js'; import { BackupResolver } from '@app/unraid-api/graph/resolvers/backup/backup.resolver.js'; import { FormatService } from '@app/unraid-api/graph/resolvers/backup/format.service.js'; import { RCloneModule } from '@app/unraid-api/graph/resolvers/rclone/rclone.module.js'; @Module({ imports: [RCloneModule, ScheduleModule.forRoot()], - providers: [BackupResolver, BackupConfigService, FormatService], + providers: [BackupResolver, BackupMutationsResolver, BackupConfigService, FormatService], exports: [], }) export class BackupModule {} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts b/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts index 58c95ff96..4417def97 100644 --- a/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts +++ b/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts @@ -1,6 +1,7 @@ import { Inject, Logger } from '@nestjs/common'; -import { Args, Mutation, Query, ResolveField, Resolver } from '@nestjs/graphql'; +import { Args, Mutation, Query, ResolveField, Resolver, Subscription } from '@nestjs/graphql'; +import { pubsub } from '@app/core/pubsub.js'; import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; import { Backup, @@ -9,13 +10,12 @@ import { BackupJobConfigForm, BackupJobConfigFormInput, BackupStatus, - CreateBackupJobConfigInput, - InitiateBackupInput, - UpdateBackupJobConfigInput, } from '@app/unraid-api/graph/resolvers/backup/backup.model.js'; import { FormatService } from '@app/unraid-api/graph/resolvers/backup/format.service.js'; import { buildBackupJobConfigSchema } from '@app/unraid-api/graph/resolvers/backup/jsonforms/backup-jsonforms-config.js'; +import { RCloneJob } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js'; +import { PrefixedID } from '@app/unraid-api/graph/scalars/graphql-type-prefixed-id.js'; @Resolver(() => Backup) export class BackupResolver { @@ -41,8 +41,11 @@ export class BackupResolver { @ResolveField(() => [BackupJob], { description: 'Get all running backup jobs', }) - async jobs(): Promise { - return this.backupJobs(); + async jobs( + @Args('showSystemJobs', { type: () => Boolean, nullable: true, defaultValue: false }) + showSystemJobs?: boolean + ): Promise { + return this.backupJobs(showSystemJobs); } @ResolveField(() => [BackupJobConfig], { @@ -60,81 +63,19 @@ export class BackupResolver { return this.backupConfigService.getBackupJobConfig(id); } - @Mutation(() => BackupJobConfig, { - description: 'Create a new backup job configuration', - }) - async createBackupJobConfig( - @Args('input') input: CreateBackupJobConfigInput - ): Promise { - return this.backupConfigService.createBackupJobConfig(input); - } - - @Mutation(() => BackupJobConfig, { - description: 'Update a backup job configuration', - nullable: true, - }) - async updateBackupJobConfig( - @Args('id') id: string, - @Args('input') input: UpdateBackupJobConfigInput - ): Promise { - return this.backupConfigService.updateBackupJobConfig(id, input); - } - - @Mutation(() => Boolean, { - description: 'Delete a backup job configuration', - }) - async deleteBackupJobConfig(@Args('id') id: string): Promise { - return this.backupConfigService.deleteBackupJobConfig(id); - } - - private async backupJobs(): Promise { - try { - const jobs = await this.rcloneService['rcloneApiService'].listRunningJobs(); - return ( - jobs.jobids?.map((jobId: string, index: number) => { - const stats = jobs.stats?.[index] || {}; - return { - id: jobId, - type: 'backup', - stats, - formattedBytes: stats.bytes - ? this.formatService.formatBytes(stats.bytes) - : undefined, - formattedSpeed: stats.speed - ? this.formatService.formatBytes(stats.speed) - : undefined, - formattedElapsedTime: stats.elapsedTime - ? this.formatService.formatDuration(stats.elapsedTime) - : undefined, - formattedEta: stats.eta - ? this.formatService.formatDuration(stats.eta) - : undefined, - }; - }) || [] - ); - } catch (error) { - this.logger.error('Failed to fetch backup jobs:', error); - return []; - } - } - @Query(() => BackupJob, { description: 'Get status of a specific backup job', nullable: true, }) - async backupJob(@Args('jobId') jobId: string): Promise { + async backupJob( + @Args('jobId', { type: () => PrefixedID }) jobId: string + ): Promise { try { const status = await this.rcloneService['rcloneApiService'].getJobStatus({ jobId }); return { id: jobId, - type: status.group || 'backup', + group: status.group || '', stats: status, - formattedBytes: status.bytes ? this.formatService.formatBytes(status.bytes) : undefined, - formattedSpeed: status.speed ? this.formatService.formatBytes(status.speed) : undefined, - formattedElapsedTime: status.elapsedTime - ? this.formatService.formatDuration(status.elapsedTime) - : undefined, - formattedEta: status.eta ? this.formatService.formatDuration(status.eta) : undefined, }; } catch (error) { this.logger.error(`Failed to fetch backup job ${jobId}:`, error); @@ -142,32 +83,6 @@ export class BackupResolver { } } - @Mutation(() => BackupStatus, { - description: 'Initiates a backup using a configured remote.', - }) - async initiateBackup(@Args('input') input: InitiateBackupInput): Promise { - try { - const result = await this.rcloneService['rcloneApiService'].startBackup({ - srcPath: input.sourcePath, - dstPath: `${input.remoteName}:${input.destinationPath}`, - options: input.options, - }); - - return { - status: 'Backup initiated successfully', - jobId: result.jobid || result.jobId, - }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - this.logger.error('Failed to initiate backup:', error); - - return { - status: `Failed to initiate backup: ${errorMessage}`, - jobId: undefined, - }; - } - } - @ResolveField(() => BackupStatus, { description: 'Get the status for the backup service', }) @@ -198,4 +113,77 @@ export class BackupResolver { uiSchema, }; } + + @Subscription(() => BackupJob, { + description: 'Subscribe to real-time backup job progress updates', + nullable: true, + }) + async backupJobProgress(@Args('jobId', { type: () => PrefixedID }) jobId: string) { + return pubsub.asyncIterableIterator(`BACKUP_JOB_PROGRESS:${jobId}`); + } + + private async backupJobs(showSystemJobs: boolean = false): Promise { + try { + this.logger.debug(`backupJobs called with showSystemJobs: ${showSystemJobs}`); + + let jobs; + if (showSystemJobs) { + // Get all jobs when showing system jobs + jobs = await this.rcloneService['rcloneApiService'].getAllJobsWithStats(); + this.logger.debug(`All jobs with stats: ${JSON.stringify(jobs)}`); + } else { + // Get only backup jobs with enhanced stats when not showing system jobs + jobs = await this.rcloneService['rcloneApiService'].getBackupJobsWithStats(); + this.logger.debug(`Backup jobs with enhanced stats: ${JSON.stringify(jobs)}`); + } + + // Filter and map jobs + const allJobs = + jobs.jobids?.map((jobId: string | number, index: number) => { + const stats = jobs.stats?.[index] || {}; + const group = stats.group || ''; + + this.logger.debug( + `Processing job ${jobId}: group="${group}", stats keys: [${Object.keys(stats).join(', ')}]` + ); + + return { + id: String(jobId), + group: group, + stats, + }; + }) || []; + + this.logger.debug(`Mapped ${allJobs.length} jobs total`); + + // Log all job groups for analysis + const jobGroupSummary = allJobs.map((job) => ({ id: job.id, group: job.group })); + this.logger.debug(`All job groups: ${JSON.stringify(jobGroupSummary)}`); + + // Filter based on showSystemJobs flag + if (showSystemJobs) { + this.logger.debug(`Returning all ${allJobs.length} jobs (showSystemJobs=true)`); + return allJobs; + } else { + // When not showing system jobs, we already filtered to backup jobs in getBackupJobsWithStats + // But let's double-check the filtering for safety + const filteredJobs = allJobs.filter((job) => job.group.startsWith('backup/')); + this.logger.debug( + `Filtered to ${filteredJobs.length} backup jobs (group starts with 'backup/')` + ); + + const nonBackupJobs = allJobs.filter((job) => !job.group.startsWith('backup/')); + if (nonBackupJobs.length > 0) { + this.logger.debug( + `Excluded ${nonBackupJobs.length} non-backup jobs: ${JSON.stringify(nonBackupJobs.map((j) => ({ id: j.id, group: j.group })))}` + ); + } + + return filteredJobs; + } + } catch (error) { + this.logger.error('Failed to fetch backup jobs:', error); + return []; + } + } } diff --git a/api/src/unraid-api/graph/resolvers/base.model.ts b/api/src/unraid-api/graph/resolvers/base.model.ts index cad23100f..02be22a55 100644 --- a/api/src/unraid-api/graph/resolvers/base.model.ts +++ b/api/src/unraid-api/graph/resolvers/base.model.ts @@ -9,6 +9,7 @@ export enum Resource { ACTIVATION_CODE = 'ACTIVATION_CODE', API_KEY = 'API_KEY', ARRAY = 'ARRAY', + BACKUP = 'BACKUP', CLOUD = 'CLOUD', CONFIG = 'CONFIG', CONNECT = 'CONNECT', diff --git a/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts b/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts index 8beb1f7a8..9a9e8c7f6 100644 --- a/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts +++ b/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts @@ -17,6 +17,11 @@ export class DockerMutations {} @ObjectType() export class VmMutations {} +@ObjectType({ + description: 'Backup related mutations', +}) +export class BackupMutations {} + @ObjectType({ description: 'API Key related mutations', }) @@ -43,6 +48,9 @@ export class RootMutations { @Field(() => VmMutations, { description: 'VM related mutations' }) vm: VmMutations = new VmMutations(); + @Field(() => BackupMutations, { description: 'Backup related mutations' }) + backup: BackupMutations = new BackupMutations(); + @Field(() => ApiKeyMutations, { description: 'API Key related mutations' }) apiKey: ApiKeyMutations = new ApiKeyMutations(); diff --git a/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts b/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts index 42a9cb126..3bd6b69d3 100644 --- a/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts +++ b/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts @@ -3,6 +3,7 @@ import { Mutation, Resolver } from '@nestjs/graphql'; import { ApiKeyMutations, ArrayMutations, + BackupMutations, DockerMutations, ParityCheckMutations, RCloneMutations, @@ -27,6 +28,11 @@ export class RootMutationsResolver { return new VmMutations(); } + @Mutation(() => BackupMutations, { name: 'backup' }) + backup(): BackupMutations { + return new BackupMutations(); + } + @Mutation(() => ParityCheckMutations, { name: 'parityCheck' }) parityCheck(): ParityCheckMutations { return new ParityCheckMutations(); diff --git a/api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html b/api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html new file mode 100644 index 000000000..b966b3182 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html @@ -0,0 +1,2626 @@ + + + + + + + + + + + Remote Control / API + + + + + + + + + + + +
+
+
+ + + v1.40 + + + + +

Remote controlling rclone with its API

+

If rclone is run with the --rc flag then it starts an HTTP server +which can be used to remote control rclone using its API.

+

You can either use the rc command to access the API +or use HTTP directly.

+

If you just want to run a remote control then see the rcd command.

+

Supported parameters

+

--rc

+

Flag to start the http server listen on remote requests.

+

--rc-addr=IP

+

IPaddress:Port or :Port to bind server to. (default "localhost:5572").

+

--rc-cert=KEY

+

SSL PEM key (concatenation of certificate and CA certificate).

+

--rc-client-ca=PATH

+

Client certificate authority to verify clients with.

+

--rc-htpasswd=PATH

+

htpasswd file - if not provided no authentication is done.

+

--rc-key=PATH

+

TLS PEM private key file.

+

--rc-max-header-bytes=VALUE

+

Maximum size of request header (default 4096).

+

--rc-min-tls-version=VALUE

+

The minimum TLS version that is acceptable. Valid values are "tls1.0", +"tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").

+

--rc-user=VALUE

+

User name for authentication.

+

--rc-pass=VALUE

+

Password for authentication.

+

--rc-realm=VALUE

+

Realm for authentication (default "rclone").

+

--rc-server-read-timeout=DURATION

+

Timeout for server reading data (default 1h0m0s).

+

--rc-server-write-timeout=DURATION

+

Timeout for server writing data (default 1h0m0s).

+

--rc-serve

+

Enable the serving of remote objects via the HTTP interface. This +means objects will be accessible at http://127.0.0.1:5572/ by default, +so you can browse to http://127.0.0.1:5572/ or http://127.0.0.1:5572/* +to see a listing of the remotes. Objects may be requested from +remotes using this syntax http://127.0.0.1:5572/[remote:path]/path/to/object

+

Default Off.

+

--rc-serve-no-modtime

+

Set this flag to skip reading the modification time (can speed things up).

+

Default Off.

+

--rc-files /path/to/directory

+

Path to local files to serve on the HTTP server.

+

If this is set then rclone will serve the files in that directory. It +will also open the root in the web browser if specified. This is for +implementing browser based GUIs for rclone functions.

+

If --rc-user or --rc-pass is set then the URL that is opened will +have the authorization in the URL in the http://user:pass@localhost/ +style.

+

Default Off.

+

--rc-enable-metrics

+

Enable OpenMetrics/Prometheus compatible endpoint at /metrics. +If more control over the metrics is desired (for example running it on a different port or with different auth) then endpoint can be enabled with the --metrics-* flags instead.

+

Default Off.

+

--rc-web-gui

+

Set this flag to serve the default web gui on the same port as rclone.

+

Default Off.

+

--rc-allow-origin

+

Set the allowed Access-Control-Allow-Origin for rc requests.

+

Can be used with --rc-web-gui if the rclone is running on different IP than the web-gui.

+

Default is IP address on which rc is running.

+

--rc-web-fetch-url

+

Set the URL to fetch the rclone-web-gui files from.

+

Default https://api.github.com/repos/rclone/rclone-webui-react/releases/latest.

+

--rc-web-gui-update

+

Set this flag to check and update rclone-webui-react from the rc-web-fetch-url.

+

Default Off.

+

--rc-web-gui-force-update

+

Set this flag to force update rclone-webui-react from the rc-web-fetch-url.

+

Default Off.

+

--rc-web-gui-no-open-browser

+

Set this flag to disable opening browser automatically when using web-gui.

+

Default Off.

+

--rc-job-expire-duration=DURATION

+

Expire finished async jobs older than DURATION (default 60s).

+

--rc-job-expire-interval=DURATION

+

Interval duration to check for expired async jobs (default 10s).

+

--rc-no-auth

+

By default rclone will require authorisation to have been set up on +the rc interface in order to use any methods which access any rclone +remotes. Eg operations/list is denied as it involved creating a +remote as is sync/copy.

+

If this is set then no authorisation will be required on the server to +use these methods. The alternative is to use --rc-user and +--rc-pass and use these credentials in the request.

+

Default Off.

+

--rc-baseurl

+

Prefix for URLs.

+

Default is root

+

--rc-template

+

User-specified template.

+

Accessing the remote control via the rclone rc command

+

Rclone itself implements the remote control protocol in its rclone rc command.

+

You can use it like this:

+
$ rclone rc rc/noop param1=one param2=two
+{
+	"param1": "one",
+	"param2": "two"
+}
+

If the remote is running on a different URL than the default +http://localhost:5572/, use the --url option to specify it:

+
$ rclone rc --url http://some.remote:1234/ rc/noop
+

Or, if the remote is listening on a Unix socket, use the --unix-socket option +instead:

+
$ rclone rc --unix-socket /tmp/rclone.sock rc/noop
+

Run rclone rc on its own, without any commands, to see the help for the +installed remote control commands. Note that this also needs to connect to the +remote server.

+

JSON input

+

rclone rc also supports a --json flag which can be used to send +more complicated input parameters.

+
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 } }' rc/noop
+{
+	"p1": [
+		1,
+		"2",
+		null,
+		4
+	],
+	"p2": {
+		"a": 1,
+		"b": 2
+	}
+}
+

If the parameter being passed is an object then it can be passed as a +JSON string rather than using the --json flag which simplifies the +command line.

+
rclone rc operations/list fs=/tmp remote=test opt='{"showHash": true}'
+

Rather than

+
rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}'
+

Special parameters

+

The rc interface supports some special parameters which apply to +all commands. These start with _ to show they are different.

+

Running asynchronous jobs with _async = true

+

Each rc call is classified as a job and it is assigned its own id. By default +jobs are executed immediately as they are created or synchronously.

+

If _async has a true value when supplied to an rc call then it will +return immediately with a job id and the task will be run in the +background. The job/status call can be used to get information of +the background job. The job can be queried for up to 1 minute after +it has finished.

+

It is recommended that potentially long running jobs, e.g. sync/sync, +sync/copy, sync/move, operations/purge are run with the _async +flag to avoid any potential problems with the HTTP request and +response timing out.

+

Starting a job with the _async flag:

+
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 }, "_async": true }' rc/noop
+{
+	"jobid": 2
+}
+

Query the status to see if the job has finished. For more information +on the meaning of these return parameters see the job/status call.

+
$ rclone rc --json '{ "jobid":2 }' job/status
+{
+	"duration": 0.000124163,
+	"endTime": "2018-10-27T11:38:07.911245881+01:00",
+	"error": "",
+	"finished": true,
+	"id": 2,
+	"output": {
+		"_async": true,
+		"p1": [
+			1,
+			"2",
+			null,
+			4
+		],
+		"p2": {
+			"a": 1,
+			"b": 2
+		}
+	},
+	"startTime": "2018-10-27T11:38:07.911121728+01:00",
+	"success": true
+}
+

job/list can be used to show the running or recently completed jobs

+
$ rclone rc job/list
+{
+	"jobids": [
+		2
+	]
+}
+

Setting config flags with _config

+

If you wish to set config (the equivalent of the global flags) for the +duration of an rc call only then pass in the _config parameter.

+

This should be in the same format as the config key returned by +options/get.

+

For example, if you wished to run a sync with the --checksum +parameter, you would pass this parameter in your JSON blob.

+
"_config":{"CheckSum": true}
+
+

If using rclone rc this could be passed as

+
rclone rc sync/sync ... _config='{"CheckSum": true}'
+
+

Any config parameters you don't set will inherit the global defaults +which were set with command line flags or environment variables.

+

Note that it is possible to set some values as strings or integers - +see data types for more info. Here is an example +setting the equivalent of --buffer-size in string or integer format.

+
"_config":{"BufferSize": "42M"}
+"_config":{"BufferSize": 44040192}
+
+

If you wish to check the _config assignment has worked properly then +calling options/local will show what the value got set to.

+

Setting filter flags with _filter

+

If you wish to set filters for the duration of an rc call only then +pass in the _filter parameter.

+

This should be in the same format as the filter key returned by +options/get.

+

For example, if you wished to run a sync with these flags

+
--max-size 1M --max-age 42s --include "a" --include "b"
+
+

you would pass this parameter in your JSON blob.

+
"_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
+
+

If using rclone rc this could be passed as

+
rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
+
+

Any filter parameters you don't set will inherit the global defaults +which were set with command line flags or environment variables.

+

Note that it is possible to set some values as strings or integers - +see data types for more info. Here is an example +setting the equivalent of --buffer-size in string or integer format.

+
"_filter":{"MinSize": "42M"}
+"_filter":{"MinSize": 44040192}
+
+

If you wish to check the _filter assignment has worked properly then +calling options/local will show what the value got set to.

+

Assigning operations to groups with _group = value

+

Each rc call has its own stats group for tracking its metrics. By default +grouping is done by the composite group name from prefix job/ and id of the +job like so job/1.

+

If _group has a value then stats for that request will be grouped under that +value. This allows caller to group stats under their own name.

+

Stats for specific group can be accessed by passing group to core/stats:

+
$ rclone rc --json '{ "group": "job/1" }' core/stats
+{
+	"speed": 12345
+	...
+}
+

Data types

+

When the API returns types, these will mostly be straight forward +integer, string or boolean types.

+

However some of the types returned by the options/get +call and taken by the options/set calls as well as the +vfsOpt, mountOpt and the _config parameters.

+
    +
  • Duration - these are returned as an integer duration in +nanoseconds. They may be set as an integer, or they may be set with +time string, eg "5s". See the options section for +more info.
  • +
  • Size - these are returned as an integer number of bytes. They may +be set as an integer or they may be set with a size suffix string, +eg "10M". See the options section for more info.
  • +
  • Enumerated type (such as CutoffMode, DumpFlags, LogLevel, +VfsCacheMode - these will be returned as an integer and may be set +as an integer but more conveniently they can be set as a string, eg +"HARD" for CutoffMode or DEBUG for LogLevel.
  • +
  • BandwidthSpec - this will be set and returned as a string, eg +"1M".
  • +
+

Option blocks

+

The calls options/info (for the main config) and +config/providers (for the backend config) may be +used to get information on the rclone configuration options. This can +be used to build user interfaces for displaying and setting any rclone +option.

+

These consist of arrays of Option blocks. These have the following +format. Each block describes a single option.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeOptionalDescription
NamestringNname of the option in snake_case
FieldNamestringNname of the field used in the rc - if blank use Name
HelpstringNhelp, started with a single sentence on a single line
GroupsstringYgroups this option belongs to - comma separated string for options classification
ProviderstringYset to filter on provider
DefaultanyNdefault value, if set (and not to nil or "") then Required does nothing
ValueanyNvalue to be set by flags
ExamplesExamplesYpredefined values that can be selected from list (multiple-choice option)
ShortOptstringYthe short command line option for this
HideVisibilityNif non zero, this option is hidden from the configurator or the command line
RequiredboolNthis option is required, meaning value cannot be empty unless there is a default
IsPasswordboolNset if the option is a password
NoPrefixboolNset if the option for this should not use the backend prefix
AdvancedboolNset if this is an advanced config option
ExclusiveboolNset if the answer can only be one of the examples (empty string allowed unless Required or Default is set)
SensitiveboolNset if this option should be redacted when using rclone config redacted
+

An example of this might be the --log-level flag. Note that the +Name of the option becomes the command line flag with _ replaced +with -.

+
{
+    "Advanced": false,
+    "Default": 5,
+    "DefaultStr": "NOTICE",
+    "Examples": [
+        {
+            "Help": "",
+            "Value": "EMERGENCY"
+        },
+        {
+            "Help": "",
+            "Value": "ALERT"
+        },
+        ...
+    ],
+    "Exclusive": true,
+    "FieldName": "LogLevel",
+    "Groups": "Logging",
+    "Help": "Log level DEBUG|INFO|NOTICE|ERROR",
+    "Hide": 0,
+    "IsPassword": false,
+    "Name": "log_level",
+    "NoPrefix": true,
+    "Required": true,
+    "Sensitive": false,
+    "Type": "LogLevel",
+    "Value": null,
+    "ValueStr": "NOTICE"
+},
+

Note that the Help may be multiple lines separated by \n. The +first line will always be a short sentence and this is the sentence +shown when running rclone help flags.

+

Specifying remotes to work on

+

Remotes are specified with the fs=, srcFs=, dstFs= +parameters depending on the command being used.

+

The parameters can be a string as per the rest of rclone, eg +s3:bucket/path or :sftp:/my/dir. They can also be specified as +JSON blobs.

+

If specifying a JSON blob it should be a object mapping strings to +strings. These values will be used to configure the remote. There are +3 special values which may be set:

+
    +
  • type - set to type to specify a remote called :type:
  • +
  • _name - set to name to specify a remote called name:
  • +
  • _root - sets the root of the remote - may be empty
  • +
+

One of _name or type should normally be set. If the local +backend is desired then type should be set to local. If _root +isn't specified then it defaults to the root of the remote.

+

For example this JSON is equivalent to remote:/tmp

+
{
+    "_name": "remote",
+    "_root": "/tmp"
+}
+

And this is equivalent to :sftp,host='example.com':/tmp

+
{
+    "type": "sftp",
+    "host": "example.com",
+    "_root": "/tmp"
+}
+

And this is equivalent to /tmp/dir

+
{
+    type = "local",
+    _root = "/tmp/dir"
+}
+

Supported commands

+ +

backend/command: Runs a backend command.

+

This takes the following parameters:

+
    +
  • command - a string with the command name
  • +
  • fs - a remote name string e.g. "drive:"
  • +
  • arg - a list of arguments for the backend command
  • +
  • opt - a map of string to string of options
  • +
+

Returns:

+
    +
  • result - result from the backend command
  • +
+

Example:

+
rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2
+
+

Returns

+
{
+	"result": {
+		"arg": [
+			"path1",
+			"path2"
+		],
+		"name": "noop",
+		"opt": {
+			"blue": "",
+			"echo": "yes"
+		}
+	}
+}
+

Note that this is the direct equivalent of using this "backend" +command:

+
rclone backend noop . -o echo=yes -o blue path1 path2
+
+

Note that arguments must be preceded by the "-a" flag

+

See the backend command for more information.

+

Authentication is required for this call.

+

cache/expire: Purge a remote from cache

+

Purge a remote from the cache backend. Supports either a directory or a file. +Params:

+
    +
  • remote = path to remote (required)
  • +
  • withData = true/false to delete cached data (chunks) as well (optional)
  • +
+

Eg

+
rclone rc cache/expire remote=path/to/sub/folder/
+rclone rc cache/expire remote=/ withData=true
+
+

cache/fetch: Fetch file chunks

+

Ensure the specified file chunks are cached on disk.

+

The chunks= parameter specifies the file chunks to check. +It takes a comma separated list of array slice indices. +The slice indices are similar to Python slices: start[:end]

+

start is the 0 based chunk number from the beginning of the file +to fetch inclusive. end is 0 based chunk number from the beginning +of the file to fetch exclusive. +Both values can be negative, in which case they count from the back +of the file. The value "-5:" represents the last 5 chunks of a file.

+

Some valid examples are: +":5,-5:" -> the first and last five chunks +"0,-2" -> the first and the second last chunk +"0:10" -> the first ten chunks

+

Any parameter with a key that starts with "file" can be used to +specify files to fetch, e.g.

+
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
+
+

File names will automatically be encrypted when the a crypt remote +is used on top of the cache.

+

cache/stats: Get cache stats

+

Show statistics for the cache remote.

+

config/create: create the config for a remote.

+

This takes the following parameters:

+
    +
  • name - name of remote
  • +
  • parameters - a map of { "key": "value" } pairs
  • +
  • type - type of the new remote
  • +
  • opt - a dictionary of options to control the configuration +
      +
    • obscure - declare passwords are plain and need obscuring
    • +
    • noObscure - declare passwords are already obscured and don't need obscuring
    • +
    • nonInteractive - don't interact with a user, return questions
    • +
    • continue - continue the config process with an answer
    • +
    • all - ask all the config questions not just the post config ones
    • +
    • state - state to restart with - used with continue
    • +
    • result - result to restart with - used with continue
    • +
    +
  • +
+

See the config create command for more information on the above.

+

Authentication is required for this call.

+

config/delete: Delete a remote in the config file.

+

Parameters:

+
    +
  • name - name of remote to delete
  • +
+

See the config delete command for more information on the above.

+

Authentication is required for this call.

+

config/dump: Dumps the config file.

+

Returns a JSON object:

+
    +
  • key: value
  • +
+

Where keys are remote names and values are the config parameters.

+

See the config dump command for more information on the above.

+

Authentication is required for this call.

+

config/get: Get a remote in the config file.

+

Parameters:

+
    +
  • name - name of remote to get
  • +
+

See the config dump command for more information on the above.

+

Authentication is required for this call.

+

config/listremotes: Lists the remotes in the config file and defined in environment variables.

+

Returns

+
    +
  • remotes - array of remote names
  • +
+

See the listremotes command for more information on the above.

+

Authentication is required for this call.

+

config/password: password the config for a remote.

+

This takes the following parameters:

+
    +
  • name - name of remote
  • +
  • parameters - a map of { "key": "value" } pairs
  • +
+

See the config password command for more information on the above.

+

Authentication is required for this call.

+

config/paths: Reads the config file path and other important paths.

+

Returns a JSON object with the following keys:

+
    +
  • config: path to config file
  • +
  • cache: path to root of cache directory
  • +
  • temp: path to root of temporary directory
  • +
+

Eg

+
{
+    "cache": "/home/USER/.cache/rclone",
+    "config": "/home/USER/.rclone.conf",
+    "temp": "/tmp"
+}
+
+

See the config paths command for more information on the above.

+

Authentication is required for this call.

+

config/providers: Shows how providers are configured in the config file.

+

Returns a JSON object:

+
    +
  • providers - array of objects
  • +
+

See the config providers command +for more information on the above.

+

Note that the Options blocks are in the same format as returned by +"options/info". They are described in the +option blocks section.

+

Authentication is required for this call.

+

config/setpath: Set the path of the config file

+

Parameters:

+
    +
  • path - path to the config file to use
  • +
+

Authentication is required for this call.

+

config/update: update the config for a remote.

+

This takes the following parameters:

+
    +
  • name - name of remote
  • +
  • parameters - a map of { "key": "value" } pairs
  • +
  • opt - a dictionary of options to control the configuration +
      +
    • obscure - declare passwords are plain and need obscuring
    • +
    • noObscure - declare passwords are already obscured and don't need obscuring
    • +
    • nonInteractive - don't interact with a user, return questions
    • +
    • continue - continue the config process with an answer
    • +
    • all - ask all the config questions not just the post config ones
    • +
    • state - state to restart with - used with continue
    • +
    • result - result to restart with - used with continue
    • +
    +
  • +
+

See the config update command for more information on the above.

+

Authentication is required for this call.

+

core/bwlimit: Set the bandwidth limit.

+

This sets the bandwidth limit to the string passed in. This should be +a single bandwidth limit entry or a pair of upload:download bandwidth.

+

Eg

+
rclone rc core/bwlimit rate=off
+{
+    "bytesPerSecond": -1,
+    "bytesPerSecondTx": -1,
+    "bytesPerSecondRx": -1,
+    "rate": "off"
+}
+rclone rc core/bwlimit rate=1M
+{
+    "bytesPerSecond": 1048576,
+    "bytesPerSecondTx": 1048576,
+    "bytesPerSecondRx": 1048576,
+    "rate": "1M"
+}
+rclone rc core/bwlimit rate=1M:100k
+{
+    "bytesPerSecond": 1048576,
+    "bytesPerSecondTx": 1048576,
+    "bytesPerSecondRx": 131072,
+    "rate": "1M"
+}
+
+

If the rate parameter is not supplied then the bandwidth is queried

+
rclone rc core/bwlimit
+{
+    "bytesPerSecond": 1048576,
+    "bytesPerSecondTx": 1048576,
+    "bytesPerSecondRx": 1048576,
+    "rate": "1M"
+}
+
+

The format of the parameter is exactly the same as passed to --bwlimit +except only one bandwidth may be specified.

+

In either case "rate" is returned as a human-readable string, and +"bytesPerSecond" is returned as a number.

+

core/command: Run a rclone terminal command over rc.

+

This takes the following parameters:

+
    +
  • command - a string with the command name.
  • +
  • arg - a list of arguments for the backend command.
  • +
  • opt - a map of string to string of options.
  • +
  • returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR"). +
      +
    • Defaults to "COMBINED_OUTPUT" if not set.
    • +
    • The STREAM returnTypes will write the output to the body of the HTTP message.
    • +
    • The COMBINED_OUTPUT will write the output to the "result" parameter.
    • +
    +
  • +
+

Returns:

+
    +
  • result - result from the backend command. +
      +
    • Only set when using returnType "COMBINED_OUTPUT".
    • +
    +
  • +
  • error - set if rclone exits with an error code.
  • +
  • returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
  • +
+

Example:

+
rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
+rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
+
+

Returns:

+
{
+	"error": false,
+	"result": "<Raw command line output>"
+}
+
+OR
+{
+	"error": true,
+	"result": "<Raw command line output>"
+}
+

Authentication is required for this call.

+

core/du: Returns disk usage of a locally attached disk.

+

This returns the disk usage for the local directory passed in as dir.

+

If the directory is not passed in, it defaults to the directory +pointed to by --cache-dir.

+
    +
  • dir - string (optional)
  • +
+

Returns:

+
{
+	"dir": "/",
+	"info": {
+		"Available": 361769115648,
+		"Free": 361785892864,
+		"Total": 982141468672
+	}
+}
+

core/gc: Runs a garbage collection.

+

This tells the go runtime to do a garbage collection run. It isn't +necessary to call this normally, but it can be useful for debugging +memory problems.

+

core/group-list: Returns list of stats.

+

This returns list of stats groups currently in memory.

+

Returns the following values:

+
{
+	"groups":  an array of group names:
+		[
+			"group1",
+			"group2",
+			...
+		]
+}
+

core/memstats: Returns the memory statistics

+

This returns the memory statistics of the running program. What the values mean +are explained in the go docs: https://golang.org/pkg/runtime/#MemStats

+

The most interesting values for most people are:

+
    +
  • HeapAlloc - this is the amount of memory rclone is actually using
  • +
  • HeapSys - this is the amount of memory rclone has obtained from the OS
  • +
  • Sys - this is the total amount of memory requested from the OS +
      +
    • It is virtual memory so may include unused memory
    • +
    +
  • +
+

core/obscure: Obscures a string passed in.

+

Pass a clear string and rclone will obscure it for the config file:

+
    +
  • clear - string
  • +
+

Returns:

+
    +
  • obscured - string
  • +
+

core/pid: Return PID of current process

+

This returns PID of current process. +Useful for stopping rclone process.

+

core/quit: Terminates the app.

+

(Optional) Pass an exit code to be used for terminating the app:

+
    +
  • exitCode - int
  • +
+

core/stats: Returns stats about current transfers.

+

This returns all available stats:

+
rclone rc core/stats
+
+

If group is not provided then summed up stats for all groups will be +returned.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

Returns the following values:

+
{
+	"bytes": total transferred bytes since the start of the group,
+	"checks": number of files checked,
+	"deletes" : number of files deleted,
+	"elapsedTime": time in floating point seconds since rclone was started,
+	"errors": number of errors,
+	"eta": estimated time in seconds until the group completes,
+	"fatalError": boolean whether there has been at least one fatal error,
+	"lastError": last error string,
+	"renames" : number of files renamed,
+	"retryError": boolean showing whether there has been at least one non-NoRetryError,
+        "serverSideCopies": number of server side copies done,
+        "serverSideCopyBytes": number bytes server side copied,
+        "serverSideMoves": number of server side moves done,
+        "serverSideMoveBytes": number bytes server side moved,
+	"speed": average speed in bytes per second since start of the group,
+	"totalBytes": total number of bytes in the group,
+	"totalChecks": total number of checks in the group,
+	"totalTransfers": total number of transfers in the group,
+	"transferTime" : total time spent on running jobs,
+	"transfers": number of transferred files,
+	"transferring": an array of currently active file transfers:
+		[
+			{
+				"bytes": total transferred bytes for this file,
+				"eta": estimated time in seconds until file transfer completion
+				"name": name of the file,
+				"percentage": progress of the file transfer in percent,
+				"speed": average speed over the whole transfer in bytes per second,
+				"speedAvg": current speed in bytes per second as an exponentially weighted moving average,
+				"size": size of the file in bytes
+			}
+		],
+	"checking": an array of names of currently active file checks
+		[]
+}
+

Values for "transferring", "checking" and "lastError" are only assigned if data is available. +The value for "eta" is null if an eta cannot be determined.

+

core/stats-delete: Delete stats group.

+

This deletes entire stats group.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

core/stats-reset: Reset stats.

+

This clears counters, errors and finished transfers for all stats or specific +stats group if group is provided.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

core/transferred: Returns stats about completed transfers.

+

This returns stats about completed transfers:

+
rclone rc core/transferred
+
+

If group is not provided then completed transfers for all groups will be +returned.

+

Note only the last 100 completed transfers are returned.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

Returns the following values:

+
{
+	"transferred":  an array of completed transfers (including failed ones):
+		[
+			{
+				"name": name of the file,
+				"size": size of the file in bytes,
+				"bytes": total transferred bytes for this file,
+				"checked": if the transfer is only checked (skipped, deleted),
+				"timestamp": integer representing millisecond unix epoch,
+				"error": string description of the error (empty if successful),
+				"jobid": id of the job that this transfer belongs to
+			}
+		]
+}
+

core/version: Shows the current version of rclone and the go runtime.

+

This shows the current version of go and the go runtime:

+
    +
  • version - rclone version, e.g. "v1.53.0"
  • +
  • decomposed - version number as [major, minor, patch]
  • +
  • isGit - boolean - true if this was compiled from the git version
  • +
  • isBeta - boolean - true if this is a beta version
  • +
  • os - OS in use as according to Go
  • +
  • arch - cpu architecture in use according to Go
  • +
  • goVersion - version of Go runtime in use
  • +
  • linking - type of rclone executable (static or dynamic)
  • +
  • goTags - space separated build tags or "none"
  • +
+

debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling.

+

SetBlockProfileRate controls the fraction of goroutine blocking events +that are reported in the blocking profile. The profiler aims to sample +an average of one blocking event per rate nanoseconds spent blocked.

+

To include every blocking event in the profile, pass rate = 1. To turn +off profiling entirely, pass rate <= 0.

+

After calling this you can use this to see the blocking profile:

+
go tool pprof http://localhost:5572/debug/pprof/block
+
+

Parameters:

+
    +
  • rate - int
  • +
+

debug/set-gc-percent: Call runtime/debug.SetGCPercent for setting the garbage collection target percentage.

+

SetGCPercent sets the garbage collection target percentage: a collection is triggered +when the ratio of freshly allocated data to live data remaining after the previous collection +reaches this percentage. SetGCPercent returns the previous setting. The initial setting is the +value of the GOGC environment variable at startup, or 100 if the variable is not set.

+

This setting may be effectively reduced in order to maintain a memory limit. +A negative percentage effectively disables garbage collection, unless the memory limit is reached.

+

See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details.

+

Parameters:

+
    +
  • gc-percent - int
  • +
+

debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling.

+

SetMutexProfileFraction controls the fraction of mutex contention +events that are reported in the mutex profile. On average 1/rate +events are reported. The previous rate is returned.

+

To turn off profiling entirely, pass rate 0. To just read the current +rate, pass rate < 0. (For n>1 the details of sampling may change.)

+

Once this is set you can look use this to profile the mutex contention:

+
go tool pprof http://localhost:5572/debug/pprof/mutex
+
+

Parameters:

+
    +
  • rate - int
  • +
+

Results:

+
    +
  • previousRate - int
  • +
+

debug/set-soft-memory-limit: Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime.

+

SetMemoryLimit provides the runtime with a soft memory limit.

+

The runtime undertakes several processes to try to respect this memory limit, including +adjustments to the frequency of garbage collections and returning memory to the underlying +system more aggressively. This limit will be respected even if GOGC=off (or, if SetGCPercent(-1) is executed).

+

The input limit is provided as bytes, and includes all memory mapped, managed, and not +released by the Go runtime. Notably, it does not account for space used by the Go binary +and memory external to Go, such as memory managed by the underlying system on behalf of +the process, or memory managed by non-Go code inside the same process. +Examples of excluded memory sources include: OS kernel memory held on behalf of the process, +memory allocated by C code, and memory mapped by syscall.Mmap (because it is not managed by the Go runtime).

+

A zero limit or a limit that's lower than the amount of memory used by the Go runtime may cause +the garbage collector to run nearly continuously. However, the application may still make progress.

+

The memory limit is always respected by the Go runtime, so to effectively disable this behavior, +set the limit very high. math.MaxInt64 is the canonical value for disabling the limit, but values +much greater than the available memory on the underlying system work just as well.

+

See https://go.dev/doc/gc-guide for a detailed guide explaining the soft memory limit in more detail, +as well as a variety of common use-cases and scenarios.

+

SetMemoryLimit returns the previously set memory limit. A negative input does not adjust the limit, +and allows for retrieval of the currently set memory limit.

+

Parameters:

+
    +
  • mem-limit - int
  • +
+

fscache/clear: Clear the Fs cache.

+

This clears the fs cache. This is where remotes created from backends +are cached for a short while to make repeated rc calls more efficient.

+

If you change the parameters of a backend then you may want to call +this to clear an existing remote out of the cache before re-creating +it.

+

Authentication is required for this call.

+

fscache/entries: Returns the number of entries in the fs cache.

+

This returns the number of entries in the fs cache.

+

Returns

+
    +
  • entries - number of items in the cache
  • +
+

Authentication is required for this call.

+

job/list: Lists the IDs of the running jobs

+

Parameters: None.

+

Results:

+
    +
  • executeId - string id of rclone executing (change after restart)
  • +
  • jobids - array of integer job ids (starting at 1 on each restart)
  • +
+

job/status: Reads the status of the job ID

+

Parameters:

+
    +
  • jobid - id of the job (integer).
  • +
+

Results:

+
    +
  • finished - boolean
  • +
  • duration - time in seconds that the job ran for
  • +
  • endTime - time the job finished (e.g. "2018-10-26T18:50:20.528746884+01:00")
  • +
  • error - error from the job or empty string for no error
  • +
  • finished - boolean whether the job has finished or not
  • +
  • id - as passed in above
  • +
  • startTime - time the job started (e.g. "2018-10-26T18:50:20.528336039+01:00")
  • +
  • success - boolean - true for success false otherwise
  • +
  • output - output of the job as would have been returned if called synchronously
  • +
  • progress - output of the progress related to the underlying job
  • +
+

job/stop: Stop the running job

+

Parameters:

+
    +
  • jobid - id of the job (integer).
  • +
+

job/stopgroup: Stop all running jobs in a group

+

Parameters:

+
    +
  • group - name of the group (string).
  • +
+

mount/listmounts: Show current mount points

+

This shows currently mounted points, which can be used for performing an unmount.

+

This takes no parameters and returns

+
    +
  • mountPoints: list of current mount points
  • +
+

Eg

+
rclone rc mount/listmounts
+
+

Authentication is required for this call.

+

mount/mount: Create a new mount point

+

rclone allows Linux, FreeBSD, macOS and Windows to mount any of +Rclone's cloud storage systems as a file system with FUSE.

+

If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2

+

This takes the following parameters:

+
    +
  • fs - a remote path to be mounted (required)
  • +
  • mountPoint: valid path on the local machine (required)
  • +
  • mountType: one of the values (mount, cmount, mount2) specifies the mount implementation to use
  • +
  • mountOpt: a JSON object with Mount options in.
  • +
  • vfsOpt: a JSON object with VFS options in.
  • +
+

Example:

+
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
+rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
+rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
+
+

The vfsOpt are as described in options/get and can be seen in the the +"vfs" section when running and the mountOpt can be seen in the "mount" section:

+
rclone rc options/get
+
+

Authentication is required for this call.

+

mount/types: Show all possible mount types

+

This shows all possible mount types and returns them as a list.

+

This takes no parameters and returns

+
    +
  • mountTypes: list of mount types
  • +
+

The mount types are strings like "mount", "mount2", "cmount" and can +be passed to mount/mount as the mountType parameter.

+

Eg

+
rclone rc mount/types
+
+

Authentication is required for this call.

+

mount/unmount: Unmount selected active mount

+

rclone allows Linux, FreeBSD, macOS and Windows to +mount any of Rclone's cloud storage systems as a file system with +FUSE.

+

This takes the following parameters:

+
    +
  • mountPoint: valid path on the local machine where the mount was created (required)
  • +
+

Example:

+
rclone rc mount/unmount mountPoint=/home/<user>/mountPoint
+
+

Authentication is required for this call.

+

mount/unmountall: Unmount all active mounts

+

rclone allows Linux, FreeBSD, macOS and Windows to +mount any of Rclone's cloud storage systems as a file system with +FUSE.

+

This takes no parameters and returns error if unmount does not succeed.

+

Eg

+
rclone rc mount/unmountall
+
+

Authentication is required for this call.

+

operations/about: Return the space used on the remote

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

The result is as returned from rclone about --json

+

See the about command for more information on the above.

+

Authentication is required for this call.

+

operations/check: check the source and destination are the same

+

Checks the files in the source and destination match. It compares +sizes and hashes and logs a report of files that don't +match. It doesn't alter the source or destination.

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
  • +
  • dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
  • +
  • download - check by downloading rather than with hash
  • +
  • checkFileHash - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
  • +
  • checkFileFs - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
  • +
  • checkFileRemote - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
  • +
  • oneWay - check one way only, source files must exist on remote
  • +
  • combined - make a combined report of changes (default false)
  • +
  • missingOnSrc - report all files missing from the source (default true)
  • +
  • missingOnDst - report all files missing from the destination (default true)
  • +
  • match - report all matching files (default false)
  • +
  • differ - report all non-matching files (default true)
  • +
  • error - report all files with errors (hashing or reading) (default true)
  • +
+

If you supply the download flag, it will download the data from +both remotes and check them against each other on the fly. This can +be useful for remotes that don't support hashes or if you really want +to check all the data.

+

If you supply the size-only global flag, it will only compare the sizes not +the hashes as well. Use this for a quick check.

+

If you supply the checkFileHash option with a valid hash name, the +checkFileFs:checkFileRemote must point to a text file in the SUM +format. This treats the checksum file as the source and dstFs as the +destination. Note that srcFs is not used and should not be supplied in +this case.

+

Returns:

+
    +
  • success - true if no error, false otherwise
  • +
  • status - textual summary of check, OK or text string
  • +
  • hashType - hash used in check, may be missing
  • +
  • combined - array of strings of combined report of changes
  • +
  • missingOnSrc - array of strings of all files missing from the source
  • +
  • missingOnDst - array of strings of all files missing from the destination
  • +
  • match - array of strings of all matching files
  • +
  • differ - array of strings of all non-matching files
  • +
  • error - array of strings of all files with errors (hashing or reading)
  • +
+

Authentication is required for this call.

+

operations/cleanup: Remove trashed files in the remote or path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

See the cleanup command for more information on the above.

+

Authentication is required for this call.

+

operations/copyfile: Copy a file from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
  • +
  • srcRemote - a path within that remote e.g. "file.txt" for the source
  • +
  • dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
  • +
  • dstRemote - a path within that remote e.g. "file2.txt" for the destination
  • +
+

Authentication is required for this call.

+

operations/copyurl: Copy the URL to the object

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • url - string, URL to read from
  • +
  • autoFilename - boolean, set to true to retrieve destination file name from url
  • +
+

See the copyurl command for more information on the above.

+

Authentication is required for this call.

+

operations/delete: Remove files in the path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

See the delete command for more information on the above.

+

Authentication is required for this call.

+

operations/deletefile: Remove the single file pointed to

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the deletefile command for more information on the above.

+

Authentication is required for this call.

+

operations/fsinfo: Return information about the remote

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

This returns info about the remote passed in;

+
{
+        // optional features and whether they are available or not
+        "Features": {
+                "About": true,
+                "BucketBased": false,
+                "BucketBasedRootOK": false,
+                "CanHaveEmptyDirectories": true,
+                "CaseInsensitive": false,
+                "ChangeNotify": false,
+                "CleanUp": false,
+                "Command": true,
+                "Copy": false,
+                "DirCacheFlush": false,
+                "DirMove": true,
+                "Disconnect": false,
+                "DuplicateFiles": false,
+                "GetTier": false,
+                "IsLocal": true,
+                "ListR": false,
+                "MergeDirs": false,
+                "MetadataInfo": true,
+                "Move": true,
+                "OpenWriterAt": true,
+                "PublicLink": false,
+                "Purge": true,
+                "PutStream": true,
+                "PutUnchecked": false,
+                "ReadMetadata": true,
+                "ReadMimeType": false,
+                "ServerSideAcrossConfigs": false,
+                "SetTier": false,
+                "SetWrapper": false,
+                "Shutdown": false,
+                "SlowHash": true,
+                "SlowModTime": false,
+                "UnWrap": false,
+                "UserInfo": false,
+                "UserMetadata": true,
+                "WrapFs": false,
+                "WriteMetadata": true,
+                "WriteMimeType": false
+        },
+        // Names of hashes available
+        "Hashes": [
+                "md5",
+                "sha1",
+                "whirlpool",
+                "crc32",
+                "sha256",
+                "dropbox",
+                "mailru",
+                "quickxor"
+        ],
+        "Name": "local",        // Name as created
+        "Precision": 1,         // Precision of timestamps in ns
+        "Root": "/",            // Path as created
+        "String": "Local file system at /", // how the remote will appear in logs
+        // Information about the system metadata for this backend
+        "MetadataInfo": {
+                "System": {
+                        "atime": {
+                                "Help": "Time of last access",
+                                "Type": "RFC 3339",
+                                "Example": "2006-01-02T15:04:05.999999999Z07:00"
+                        },
+                        "btime": {
+                                "Help": "Time of file birth (creation)",
+                                "Type": "RFC 3339",
+                                "Example": "2006-01-02T15:04:05.999999999Z07:00"
+                        },
+                        "gid": {
+                                "Help": "Group ID of owner",
+                                "Type": "decimal number",
+                                "Example": "500"
+                        },
+                        "mode": {
+                                "Help": "File type and mode",
+                                "Type": "octal, unix style",
+                                "Example": "0100664"
+                        },
+                        "mtime": {
+                                "Help": "Time of last modification",
+                                "Type": "RFC 3339",
+                                "Example": "2006-01-02T15:04:05.999999999Z07:00"
+                        },
+                        "rdev": {
+                                "Help": "Device ID (if special file)",
+                                "Type": "hexadecimal",
+                                "Example": "1abc"
+                        },
+                        "uid": {
+                                "Help": "User ID of owner",
+                                "Type": "decimal number",
+                                "Example": "500"
+                        }
+                },
+                "Help": "Textual help string\n"
+        }
+}
+

This command does not have a command line equivalent so use this instead:

+
rclone rc --loopback operations/fsinfo fs=remote:
+
+

operations/hashsum: Produces a hashsum file for all the objects in the path.

+

Produces a hash file for all the objects in the path using the hash +named. The output is in the same format as the standard +md5sum/sha1sum tool.

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:" for the source, "/" for local filesystem +
      +
    • this can point to a file and just that file will be returned in the listing.
    • +
    +
  • +
  • hashType - type of hash to be used
  • +
  • download - check by downloading rather than with hash (boolean)
  • +
  • base64 - output the hashes in base64 rather than hex (boolean)
  • +
+

If you supply the download flag, it will download the data from the +remote and create the hash on the fly. This can be useful for remotes +that don't support the given hash or if you really want to check all +the data.

+

Note that if you wish to supply a checkfile to check hashes against +the current files then you should use operations/check instead of +operations/hashsum.

+

Returns:

+
    +
  • hashsum - array of strings of the hashes
  • +
  • hashType - type of hash used
  • +
+

Example:

+
$ rclone rc --loopback operations/hashsum fs=bin hashType=MD5 download=true base64=true
+{
+    "hashType": "md5",
+    "hashsum": [
+        "WTSVLpuiXyJO_kGzJerRLg==  backend-versions.sh",
+        "v1b_OlWCJO9LtNq3EIKkNQ==  bisect-go-rclone.sh",
+        "VHbmHzHh4taXzgag8BAIKQ==  bisect-rclone.sh",
+    ]
+}
+
+

See the hashsum command for more information on the above.

+

Authentication is required for this call.

+

operations/list: List the given remote and path in JSON format

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • opt - a dictionary of options to control the listing (optional) +
      +
    • recurse - If set recurse directories
    • +
    • noModTime - If set return modification time
    • +
    • showEncrypted - If set show decrypted names
    • +
    • showOrigIDs - If set show the IDs for each item if known
    • +
    • showHash - If set return a dictionary of hashes
    • +
    • noMimeType - If set don't show mime types
    • +
    • dirsOnly - If set only show directories
    • +
    • filesOnly - If set only show files
    • +
    • metadata - If set return metadata of objects also
    • +
    • hashTypes - array of strings of hash types to show if showHash set
    • +
    +
  • +
+

Returns:

+
    +
  • list +
      +
    • This is an array of objects as described in the lsjson command
    • +
    +
  • +
+

See the lsjson command for more information on the above and examples.

+

Authentication is required for this call.

+

operations/mkdir: Make a destination directory or container

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the mkdir command for more information on the above.

+

Authentication is required for this call.

+

operations/movefile: Move a file from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
  • +
  • srcRemote - a path within that remote e.g. "file.txt" for the source
  • +
  • dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
  • +
  • dstRemote - a path within that remote e.g. "file2.txt" for the destination
  • +
+

Authentication is required for this call.

+ +

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • unlink - boolean - if set removes the link rather than adding it (optional)
  • +
  • expire - string - the expiry time of the link e.g. "1d" (optional)
  • +
+

Returns:

+
    +
  • url - URL of the resource
  • +
+

See the link command for more information on the above.

+

Authentication is required for this call.

+

operations/purge: Remove a directory or container and all of its contents

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the purge command for more information on the above.

+

Authentication is required for this call.

+

operations/rmdir: Remove an empty directory or container

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the rmdir command for more information on the above.

+

Authentication is required for this call.

+

operations/rmdirs: Remove all the empty directories in the path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • leaveRoot - boolean, set to true not to delete the root
  • +
+

See the rmdirs command for more information on the above.

+

Authentication is required for this call.

+

operations/settier: Changes storage tier or class on all files in the path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

See the settier command for more information on the above.

+

Authentication is required for this call.

+

operations/settierfile: Changes storage tier or class on the single file pointed to

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the settierfile command for more information on the above.

+

Authentication is required for this call.

+

operations/size: Count the number of bytes and files in remote

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:path/to/dir"
  • +
+

Returns:

+
    +
  • count - number of files
  • +
  • bytes - number of bytes in those files
  • +
+

See the size command for more information on the above.

+

Authentication is required for this call.

+

operations/stat: Give information about the supplied file or directory

+

This takes the following parameters

+
    +
  • fs - a remote name string eg "drive:"
  • +
  • remote - a path within that remote eg "dir"
  • +
  • opt - a dictionary of options to control the listing (optional) +
      +
    • see operations/list for the options
    • +
    +
  • +
+

The result is

+
    +
  • item - an object as described in the lsjson command. Will be null if not found.
  • +
+

Note that if you are only interested in files then it is much more +efficient to set the filesOnly flag in the options.

+

See the lsjson command for more information on the above and examples.

+

Authentication is required for this call.

+

operations/uploadfile: Upload file using multiform/form-data

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • each part in body represents a file to be uploaded
  • +
+

See the uploadfile command for more information on the above.

+

Authentication is required for this call.

+

options/blocks: List all the option blocks

+

Returns:

+
    +
  • options - a list of the options block names
  • +
+

options/get: Get all the global options

+

Returns an object where keys are option block names and values are an +object with the current option values in.

+

Parameters:

+
    +
  • blocks: optional string of comma separated blocks to include +
      +
    • all are included if this is missing or ""
    • +
    +
  • +
+

Note that these are the global options which are unaffected by use of +the _config and _filter parameters. If you wish to read the parameters +set in _config then use options/config and for _filter use options/filter.

+

This shows the internal names of the option within rclone which should +map to the external options very easily with a few exceptions.

+

options/info: Get info about all the global options

+

Returns an object where keys are option block names and values are an +array of objects with info about each options.

+

Parameters:

+
    +
  • blocks: optional string of comma separated blocks to include +
      +
    • all are included if this is missing or ""
    • +
    +
  • +
+

These objects are in the same format as returned by "config/providers". They are +described in the option blocks section.

+

options/local: Get the currently active config for this call

+

Returns an object with the keys "config" and "filter". +The "config" key contains the local config and the "filter" key contains +the local filters.

+

Note that these are the local options specific to this rc call. If +_config was not supplied then they will be the global options. +Likewise with "_filter".

+

This call is mostly useful for seeing if _config and _filter passing +is working.

+

This shows the internal names of the option within rclone which should +map to the external options very easily with a few exceptions.

+

options/set: Set an option

+

Parameters:

+
    +
  • option block name containing an object with +
      +
    • key: value
    • +
    +
  • +
+

Repeated as often as required.

+

Only supply the options you wish to change. If an option is unknown +it will be silently ignored. Not all options will have an effect when +changed like this.

+

For example:

+

This sets DEBUG level logs (-vv) (these can be set by number or string)

+
rclone rc options/set --json '{"main": {"LogLevel": "DEBUG"}}'
+rclone rc options/set --json '{"main": {"LogLevel": 8}}'
+
+

And this sets INFO level logs (-v)

+
rclone rc options/set --json '{"main": {"LogLevel": "INFO"}}'
+
+

And this sets NOTICE level logs (normal without -v)

+
rclone rc options/set --json '{"main": {"LogLevel": "NOTICE"}}'
+
+

pluginsctl/addPlugin: Add a plugin using url

+

Used for adding a plugin to the webgui.

+

This takes the following parameters:

+ +

Example:

+

rclone rc pluginsctl/addPlugin

+

Authentication is required for this call.

+

pluginsctl/getPluginsForType: Get plugins with type criteria

+

This shows all possible plugins by a mime type.

+

This takes the following parameters:

+
    +
  • type - supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3).
  • +
  • pluginType - filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL).
  • +
+

Returns:

+
    +
  • loadedPlugins - list of current production plugins.
  • +
  • testPlugins - list of temporarily loaded development plugins, usually running on a different server.
  • +
+

Example:

+

rclone rc pluginsctl/getPluginsForType type=video/mp4

+

Authentication is required for this call.

+

pluginsctl/listPlugins: Get the list of currently loaded plugins

+

This allows you to get the currently enabled plugins and their details.

+

This takes no parameters and returns:

+
    +
  • loadedPlugins - list of current production plugins.
  • +
  • testPlugins - list of temporarily loaded development plugins, usually running on a different server.
  • +
+

E.g.

+

rclone rc pluginsctl/listPlugins

+

Authentication is required for this call.

+

pluginsctl/listTestPlugins: Show currently loaded test plugins

+

Allows listing of test plugins with the rclone.test set to true in package.json of the plugin.

+

This takes no parameters and returns:

+
    +
  • loadedTestPlugins - list of currently available test plugins.
  • +
+

E.g.

+
rclone rc pluginsctl/listTestPlugins
+
+

Authentication is required for this call.

+

pluginsctl/removePlugin: Remove a loaded plugin

+

This allows you to remove a plugin using it's name.

+

This takes parameters:

+
    +
  • name - name of the plugin in the format author/plugin_name.
  • +
+

E.g.

+

rclone rc pluginsctl/removePlugin name=rclone/video-plugin

+

Authentication is required for this call.

+

pluginsctl/removeTestPlugin: Remove a test plugin

+

This allows you to remove a plugin using it's name.

+

This takes the following parameters:

+
    +
  • name - name of the plugin in the format author/plugin_name.
  • +
+

Example:

+
rclone rc pluginsctl/removeTestPlugin name=rclone/rclone-webui-react
+
+

Authentication is required for this call.

+

rc/error: This returns an error

+

This returns an error with the input as part of its error string. +Useful for testing error handling.

+

rc/list: List all the registered remote control commands

+

This lists all the registered remote control commands as a JSON map in +the commands response.

+

rc/noop: Echo the input to the output parameters

+

This echoes the input parameters to the output parameters for testing +purposes. It can be used to check that rclone is still alive and to +check that parameter passing is working properly.

+

rc/noopauth: Echo the input to the output parameters requiring auth

+

This echoes the input parameters to the output parameters for testing +purposes. It can be used to check that rclone is still alive and to +check that parameter passing is working properly.

+

Authentication is required for this call.

+

sync/bisync: Perform bidirectional synchronization between two paths.

+

This takes the following parameters

+
    +
  • path1 - a remote directory string e.g. drive:path1
  • +
  • path2 - a remote directory string e.g. drive:path2
  • +
  • dryRun - dry-run mode
  • +
  • resync - performs the resync run
  • +
  • checkAccess - abort if RCLONE_TEST files are not found on both filesystems
  • +
  • checkFilename - file name for checkAccess (default: RCLONE_TEST)
  • +
  • maxDelete - abort sync if percentage of deleted files is above +this threshold (default: 50)
  • +
  • force - Bypass maxDelete safety check and run the sync
  • +
  • checkSync - true by default, false disables comparison of final listings, +only will skip sync, only compare listings from the last run
  • +
  • createEmptySrcDirs - Sync creation and deletion of empty directories. +(Not compatible with --remove-empty-dirs)
  • +
  • removeEmptyDirs - remove empty directories at the final cleanup step
  • +
  • filtersFile - read filtering patterns from a file
  • +
  • ignoreListingChecksum - Do not use checksums for listings
  • +
  • resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync. +Use at your own risk!
  • +
  • workdir - server directory for history files (default: ~/.cache/rclone/bisync)
  • +
  • backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
  • +
  • backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
  • +
  • noCleanup - retain working files
  • +
+

See bisync command help +and full bisync description +for more information.

+

Authentication is required for this call.

+

sync/copy: copy a directory from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:src" for the source
  • +
  • dstFs - a remote name string e.g. "drive:dst" for the destination
  • +
  • createEmptySrcDirs - create empty src directories on destination if set
  • +
+

See the copy command for more information on the above.

+

Authentication is required for this call.

+

sync/move: move a directory from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:src" for the source
  • +
  • dstFs - a remote name string e.g. "drive:dst" for the destination
  • +
  • createEmptySrcDirs - create empty src directories on destination if set
  • +
  • deleteEmptySrcDirs - delete empty src directories if set
  • +
+

See the move command for more information on the above.

+

Authentication is required for this call.

+

sync/sync: sync a directory from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:src" for the source
  • +
  • dstFs - a remote name string e.g. "drive:dst" for the destination
  • +
  • createEmptySrcDirs - create empty src directories on destination if set
  • +
+

See the sync command for more information on the above.

+

Authentication is required for this call.

+

vfs/forget: Forget files or directories in the directory cache.

+

This forgets the paths in the directory cache causing them to be +re-read from the remote when needed.

+

If no paths are passed in then it will forget all the paths in the +directory cache.

+
rclone rc vfs/forget
+
+

Otherwise pass files or dirs in as file=path or dir=path. Any +parameter key starting with file will forget that file and any +starting with dir will forget that dir, e.g.

+
rclone rc vfs/forget file=hello file2=goodbye dir=home/junk
+
+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/list: List active VFSes.

+

This lists the active VFSes.

+

It returns a list under the key "vfses" where the values are the VFS +names that could be passed to the other VFS commands in the "fs" +parameter.

+

vfs/poll-interval: Get the status or update the value of the poll-interval option.

+

Without any parameter given this returns the current status of the +poll-interval setting.

+

When the interval=duration parameter is set, the poll-interval value +is updated and the polling function is notified. +Setting interval=0 disables poll-interval.

+
rclone rc vfs/poll-interval interval=5m
+
+

The timeout=duration parameter can be used to specify a time to wait +for the current poll function to apply the new value. +If timeout is less or equal 0, which is the default, wait indefinitely.

+

The new poll-interval value will only be active when the timeout is +not reached.

+

If poll-interval is updated or disabled temporarily, some changes +might not get picked up by the polling function, depending on the +used remote.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/queue: Queue info for a VFS.

+

This returns info about the upload queue for the selected VFS.

+

This is only useful if --vfs-cache-mode > off. If you call it when +the --vfs-cache-mode is off, it will return an empty result.

+
{
+    "queued": // an array of files queued for upload
+    [
+        {
+            "name":      "file",   // string: name (full path) of the file,
+            "id":        123,      // integer: id of this item in the queue,
+            "size":      79,       // integer: size of the file in bytes
+            "expiry":    1.5       // float: time until file is eligible for transfer, lowest goes first
+            "tries":     1,        // integer: number of times we have tried to upload
+            "delay":     5.0,      // float: seconds between upload attempts
+            "uploading": false,    // boolean: true if item is being uploaded
+        },
+   ],
+}
+
+

The expiry time is the time until the file is eligible for being +uploaded in floating point seconds. This may go negative. As rclone +only transfers --transfers files at once, only the lowest +--transfers expiry times will have uploading as true. So there +may be files with negative expiry times for which uploading is +false.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/queue-set-expiry: Set the expiry time for an item queued for upload.

+

Use this to adjust the expiry time for an item in the upload queue. +You will need to read the id of the item using vfs/queue before +using this call.

+

You can then set expiry to a floating point number of seconds from +now when the item is eligible for upload. If you want the item to be +uploaded as soon as possible then set it to a large negative number (eg +-1000000000). If you want the upload of the item to be delayed +for a long time then set it to a large positive number.

+

Setting the expiry of an item which has already has started uploading +will have no effect - the item will carry on being uploaded.

+

This will return an error if called with --vfs-cache-mode off or if +the id passed is not found.

+

This takes the following parameters

+
    +
  • fs - select the VFS in use (optional)
  • +
  • id - a numeric ID as returned from vfs/queue
  • +
  • expiry - a new expiry time as floating point seconds
  • +
  • relative - if set, expiry is to be treated as relative to the current expiry (optional, boolean)
  • +
+

This returns an empty result on success, or an error.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/refresh: Refresh the directory cache.

+

This reads the directories for the specified paths and freshens the +directory cache.

+

If no paths are passed in then it will refresh the root directory.

+
rclone rc vfs/refresh
+
+

Otherwise pass directories in as dir=path. Any parameter key +starting with dir will refresh that directory, e.g.

+
rclone rc vfs/refresh dir=home/junk dir2=data/misc
+
+

If the parameter recursive=true is given the whole directory tree +will get refreshed. This refresh will use --fast-list if enabled.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/stats: Stats for a VFS.

+

This returns stats for the selected VFS.

+
{
+    // Status of the disk cache - only present if --vfs-cache-mode > off
+    "diskCache": {
+        "bytesUsed": 0,
+        "erroredFiles": 0,
+        "files": 0,
+        "hashType": 1,
+        "outOfSpace": false,
+        "path": "/home/user/.cache/rclone/vfs/local/mnt/a",
+        "pathMeta": "/home/user/.cache/rclone/vfsMeta/local/mnt/a",
+        "uploadsInProgress": 0,
+        "uploadsQueued": 0
+    },
+    "fs": "/mnt/a",
+    "inUse": 1,
+    // Status of the in memory metadata cache
+    "metadataCache": {
+        "dirs": 1,
+        "files": 0
+    },
+    // Options as returned by options/get
+    "opt": {
+        "CacheMaxAge": 3600000000000,
+        // ...
+        "WriteWait": 1000000000
+    }
+}
+
+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+ +

Accessing the remote control via HTTP

+

Rclone implements a simple HTTP based protocol.

+

Each endpoint takes an JSON object and returns a JSON object or an +error. The JSON objects are essentially a map of string names to +values.

+

All calls must made using POST.

+

The input objects can be supplied using URL parameters, POST +parameters or by supplying "Content-Type: application/json" and a JSON +blob in the body. There are examples of these below using curl.

+

The response will be a JSON blob in the body of the response. This is +formatted to be reasonably human-readable.

+

Error returns

+

If an error occurs then there will be an HTTP error status (e.g. 500) +and the body of the response will contain a JSON encoded error object, +e.g.

+
{
+    "error": "Expecting string value for key \"remote\" (was float64)",
+    "input": {
+        "fs": "/tmp",
+        "remote": 3
+    },
+    "status": 400
+    "path": "operations/rmdir",
+}
+

The keys in the error response are

+
    +
  • error - error string
  • +
  • input - the input parameters to the call
  • +
  • status - the HTTP status code
  • +
  • path - the path of the call
  • +
+

CORS

+

The sever implements basic CORS support and allows all origins for that. +The response to a preflight OPTIONS request will echo the requested "Access-Control-Request-Headers" back.

+

Using POST with URL parameters only

+
curl -X POST 'http://localhost:5572/rc/noop?potato=1&sausage=2'
+

Response

+
{
+	"potato": "1",
+	"sausage": "2"
+}
+

Here is what an error response looks like:

+
curl -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
+
{
+	"error": "arbitrary error on input map[potato:1 sausage:2]",
+	"input": {
+		"potato": "1",
+		"sausage": "2"
+	}
+}
+

Note that curl doesn't return errors to the shell unless you use the -f option

+
$ curl -f -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
+curl: (22) The requested URL returned error: 400 Bad Request
+$ echo $?
+22
+

Using POST with a form

+
curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop
+

Response

+
{
+	"potato": "1",
+	"sausage": "2"
+}
+

Note that you can combine these with URL parameters too with the POST +parameters taking precedence.

+
curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop?rutabaga=3&sausage=4"
+

Response

+
{
+	"potato": "1",
+	"rutabaga": "3",
+	"sausage": "4"
+}
+

Using POST with a JSON blob

+
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop
+

response

+
{
+	"password": "xyz",
+	"username": "xyz"
+}
+

This can be combined with URL parameters too if required. The JSON +blob takes precedence.

+
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop?rutabaga=3&potato=4'
+
{
+	"potato": 2,
+	"rutabaga": "3",
+	"sausage": 1
+}
+

Debugging rclone with pprof

+

If you use the --rc flag this will also enable the use of the go +profiling tools on the same port.

+

To use these, first install go.

+

Debugging memory use

+

To profile rclone's memory use you can run:

+
go tool pprof -web http://localhost:5572/debug/pprof/heap
+
+

This should open a page in your browser showing what is using what +memory.

+

You can also use the -text flag to produce a textual summary

+
$ go tool pprof -text http://localhost:5572/debug/pprof/heap
+Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
+      flat  flat%   sum%        cum   cum%
+ 1024.03kB 66.62% 66.62%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2/hpack.addDecoderNode
+     513kB 33.38%   100%      513kB 33.38%  net/http.newBufioWriterSize
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/cmd/all.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/cmd/serve.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/cmd/serve/restic.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2/hpack.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2/hpack.init.0
+         0     0%   100%  1024.03kB 66.62%  main.init
+         0     0%   100%      513kB 33.38%  net/http.(*conn).readRequest
+         0     0%   100%      513kB 33.38%  net/http.(*conn).serve
+         0     0%   100%  1024.03kB 66.62%  runtime.main
+

Debugging go routine leaks

+

Memory leaks are most often caused by go routine leaks keeping memory +alive which should have been garbage collected.

+

See all active go routines using

+
curl http://localhost:5572/debug/pprof/goroutine?debug=1
+
+

Or go to http://localhost:5572/debug/pprof/goroutine?debug=1 in your browser.

+

Other profiles to look at

+

You can see a summary of profiles available at http://localhost:5572/debug/pprof/

+

Here is how to use some of them:

+
    +
  • Memory: go tool pprof http://localhost:5572/debug/pprof/heap
  • +
  • Go routines: curl http://localhost:5572/debug/pprof/goroutine?debug=1
  • +
  • 30-second CPU profile: go tool pprof http://localhost:5572/debug/pprof/profile
  • +
  • 5-second execution trace: wget http://localhost:5572/debug/pprof/trace?seconds=5
  • +
  • Goroutine blocking profile +
      +
    • Enable first with: rclone rc debug/set-block-profile-rate rate=1 (docs)
    • +
    • go tool pprof http://localhost:5572/debug/pprof/block
    • +
    +
  • +
  • Contended mutexes: +
      +
    • Enable first with: rclone rc debug/set-mutex-profile-fraction rate=1 (docs)
    • +
    • go tool pprof http://localhost:5572/debug/pprof/mutex
    • +
    +
  • +
+

See the net/http/pprof docs +for more info on how to use the profiling and for a general overview +see the Go team's blog post on profiling go programs.

+

The profiling hook is zero overhead unless it is used.

+ + +
+
+ +
+
+ Contents +
+
+ +

+
+
+ + +
+
+ Gold Sponsor +
+
+
+
+
+ + + +
+
+ Share and Enjoy +
+
+ +
+
+ +
+
+ Links +
+ +
+ +
+
+ + + +
+ + + + + + + + \ No newline at end of file diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts index c8af35aa4..46e870b84 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts @@ -10,12 +10,18 @@ import got, { HTTPError } from 'got'; import pRetry from 'p-retry'; import { sanitizeParams } from '@app/core/log.js'; +import { FormatService } from '@app/unraid-api/graph/resolvers/backup/format.service.js'; import { CreateRCloneRemoteDto, DeleteRCloneRemoteDto, GetRCloneJobStatusDto, GetRCloneRemoteConfigDto, GetRCloneRemoteDetailsDto, + RCloneJobListResponse, + RCloneJobStats, + RCloneJobStatusResponse, + RCloneJobsWithStatsResponse, + RCloneJobWithStats, RCloneProviderOptionResponse, RCloneProviderResponse, RCloneRemoteConfig, @@ -35,21 +41,18 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { process.env.RCLONE_USERNAME || crypto.randomBytes(12).toString('base64'); private readonly rclonePassword: string = process.env.RCLONE_PASSWORD || crypto.randomBytes(24).toString('base64'); - constructor() {} + constructor(private readonly formatService: FormatService) {} async onModuleInit(): Promise { try { const { getters } = await import('@app/store/index.js'); - // Check if Rclone Socket is running, if not, start it. this.rcloneSocketPath = getters.paths()['rclone-socket']; const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log'); this.logger.log(`RClone socket path: ${this.rcloneSocketPath}`); this.logger.log(`RClone log file path: ${logFilePath}`); - // Format the base URL for Unix socket this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`; - // Check if the RClone socket exists, if not, create it. const socketExists = await this.checkRcloneSocketExists(this.rcloneSocketPath); if (socketExists) { @@ -83,19 +86,14 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { this.logger.log('RCloneApiService module destroyed'); } - /** - * Starts the RClone RC daemon on the specified socket path - */ private async startRcloneSocket(socketPath: string, logFilePath: string): Promise { try { - // Make log file exists if (!existsSync(logFilePath)) { - this.logger.debug(`Creating log file: ${logFilePath}`); await mkdir(dirname(logFilePath), { recursive: true }); await writeFile(logFilePath, '', 'utf-8'); } this.logger.log(`Starting RClone RC daemon on socket: ${socketPath}`); - // Start the process but don't wait for it to finish + this.rcloneProcess = execa( 'rclone', [ @@ -109,17 +107,15 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { ...(this.rcloneUsername ? ['--rc-user', this.rcloneUsername] : []), ...(this.rclonePassword ? ['--rc-pass', this.rclonePassword] : []), ], - { detached: false } // Keep attached to manage lifecycle + { detached: false } ); - // Handle potential errors during process spawning (e.g., command not found) this.rcloneProcess.on('error', (error: Error) => { this.logger.error(`RClone process failed to start: ${error.message}`); - this.rcloneProcess = null; // Clear the handle on error + this.rcloneProcess = null; this.isInitialized = false; }); - // Handle unexpected exit this.rcloneProcess.on('exit', (code, signal) => { this.logger.warn( `RClone process exited unexpectedly with code: ${code}, signal: ${signal}` @@ -128,14 +124,13 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { this.isInitialized = false; }); - // Wait for socket to be ready using p-retry with exponential backoff await pRetry( async () => { const isRunning = await this.checkRcloneSocketRunning(); if (!isRunning) throw new Error('Rclone socket not ready'); }, { - retries: 6, // 7 attempts total + retries: 6, minTimeout: 100, maxTimeout: 5000, factor: 2, @@ -146,7 +141,7 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return true; } catch (error: unknown) { this.logger.error(`Error starting RClone RC daemon: ${error}`); - this.rcloneProcess?.kill(); // Attempt to kill if started but failed later + this.rcloneProcess?.kill(); this.rcloneProcess = null; return false; } @@ -156,22 +151,21 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { if (this.rcloneProcess && !this.rcloneProcess.killed) { this.logger.log(`Stopping RClone RC daemon process (PID: ${this.rcloneProcess.pid})...`); try { - const killed = this.rcloneProcess.kill('SIGTERM'); // Send SIGTERM first + const killed = this.rcloneProcess.kill('SIGTERM'); if (!killed) { this.logger.warn('Failed to kill RClone process with SIGTERM, trying SIGKILL.'); - this.rcloneProcess.kill('SIGKILL'); // Force kill if SIGTERM failed + this.rcloneProcess.kill('SIGKILL'); } this.logger.log('RClone process stopped.'); } catch (error: unknown) { this.logger.error(`Error stopping RClone process: ${error}`); } finally { - this.rcloneProcess = null; // Clear the handle + this.rcloneProcess = null; } } else { this.logger.log('RClone process not running or already stopped.'); } - // Clean up the socket file if it exists if (this.rcloneSocketPath && existsSync(this.rcloneSocketPath)) { this.logger.log(`Removing RClone socket file: ${this.rcloneSocketPath}`); try { @@ -182,9 +176,6 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { } } - /** - * Checks if the RClone socket exists - */ private async checkRcloneSocketExists(socketPath: string): Promise { const socketExists = existsSync(socketPath); if (!socketExists) { @@ -194,27 +185,15 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return true; } - /** - * Checks if the RClone socket is running - */ private async checkRcloneSocketRunning(): Promise { - // Use the API check instead of execa('rclone', ['about']) as rclone might not be in PATH - // or configured correctly for the execa environment vs the rcd environment. try { - // A simple API call to check if the daemon is responsive await this.callRcloneApi('core/pid'); - this.logger.debug('RClone socket is running and responsive.'); return true; } catch (error: unknown) { - // Log less verbosely during checks - // this.logger.error(`Error checking RClone socket: ${error}`); return false; } } - /** - * Get providers supported by RClone - */ async getProviders(): Promise { const response = (await this.callRcloneApi('config/providers')) as { providers: RCloneProviderResponse[]; @@ -222,34 +201,22 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return response?.providers || []; } - /** - * List all remotes configured in rclone - */ async listRemotes(): Promise { const response = (await this.callRcloneApi('config/listremotes')) as { remotes: string[] }; return response?.remotes || []; } - /** - * Get complete remote details - */ async getRemoteDetails(input: GetRCloneRemoteDetailsDto): Promise { await validateObject(GetRCloneRemoteDetailsDto, input); const config = (await this.getRemoteConfig({ name: input.name })) || {}; return config as RCloneRemoteConfig; } - /** - * Get configuration of a remote - */ async getRemoteConfig(input: GetRCloneRemoteConfigDto): Promise { await validateObject(GetRCloneRemoteConfigDto, input); return this.callRcloneApi('config/get', { name: input.name }); } - /** - * Create a new remote configuration - */ async createRemote(input: CreateRCloneRemoteDto): Promise { await validateObject(CreateRCloneRemoteDto, input); this.logger.log(`Creating new remote: ${input.name} of type: ${input.type}`); @@ -263,9 +230,6 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return result; } - /** - * Update an existing remote configuration - */ async updateRemote(input: UpdateRCloneRemoteDto): Promise { await validateObject(UpdateRCloneRemoteDto, input); this.logger.log(`Updating remote: ${input.name}`); @@ -276,55 +240,224 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return this.callRcloneApi('config/update', params); } - /** - * Delete a remote configuration - */ async deleteRemote(input: DeleteRCloneRemoteDto): Promise { await validateObject(DeleteRCloneRemoteDto, input); this.logger.log(`Deleting remote: ${input.name}`); return this.callRcloneApi('config/delete', { name: input.name }); } - /** - * Start a backup operation using sync/copy - * This copies a directory from source to destination - */ async startBackup(input: RCloneStartBackupInput): Promise { await validateObject(RCloneStartBackupInput, input); - this.logger.log(`Starting backup from ${input.srcPath} to ${input.dstPath}`); + this.logger.log( + `Starting backup from ${input.srcPath} to ${input.dstPath} with group: ${input.group}` + ); const params = { srcFs: input.srcPath, dstFs: input.dstPath, + ...(input.async && { _async: input.async }), + ...(input.group && { _group: input.group }), ...(input.options || {}), }; - return this.callRcloneApi('sync/copy', params); + + const result = await this.callRcloneApi('sync/copy', params); + + this.logger.log( + `Backup job created with ID: ${result.jobid || result.jobId || 'unknown'}, group: ${input.group}` + ); + + return result; } - /** - * Get the status of a running job - */ - async getJobStatus(input: GetRCloneJobStatusDto): Promise { + async getJobStatus(input: GetRCloneJobStatusDto): Promise { await validateObject(GetRCloneJobStatusDto, input); - return this.callRcloneApi('job/status', { jobid: input.jobId }); + + const result = await this.callRcloneApi('job/status', { jobid: input.jobId }); + + if (result.error) { + this.logger.warn(`Job ${input.jobId} has error: ${result.error}`); + } + + if (!result.stats && result.group) { + try { + const groupStats = await this.getGroupStats(result.group); + if (groupStats && typeof groupStats === 'object') { + result.stats = { ...groupStats }; + } + } catch (groupError) { + this.logger.warn(`Failed to get group stats for job ${input.jobId}: ${groupError}`); + } + } + + if (result.stats) { + result.stats = this.enhanceStatsWithFormattedFields(result.stats); + } + + return result; } - /** - * List all running jobs - */ - async listRunningJobs(): Promise { - return this.callRcloneApi('job/list'); + async listRunningJobs(): Promise { + const result = await this.callRcloneApi('job/list'); + return result; + } + + async getGroupStats(group: string): Promise { + const result = await this.callRcloneApi('core/stats', { group }); + return result; + } + + async getBackupJobsWithStats(): Promise { + const jobList = await this.listRunningJobs(); + + if (!jobList.jobids || jobList.jobids.length === 0) { + this.logger.log('No active jobs found in RClone'); + return { jobids: [], stats: [] }; + } + + this.logger.log( + `Found ${jobList.jobids.length} active jobs in RClone, processing all jobs with stats` + ); + + const allJobs: RCloneJobWithStats[] = []; + let successfulJobQueries = 0; + + for (const jobId of jobList.jobids) { + try { + const jobStatus = await this.getJobStatus({ jobId: String(jobId) }); + const group = jobStatus.group || ''; + + let detailedStats = {}; + if (group) { + try { + const groupStats = await this.getGroupStats(group); + if (groupStats && typeof groupStats === 'object') { + detailedStats = { ...groupStats }; + } + } catch (groupError) { + this.logger.warn( + `Failed to get core/stats for job ${jobId}, group ${group}: ${groupError}` + ); + } + } + + const enhancedStats = { + ...jobStatus.stats, + ...detailedStats, + }; + + const finalStats = this.enhanceStatsWithFormattedFields(enhancedStats); + + allJobs.push({ + jobId, + stats: finalStats, + }); + + successfulJobQueries++; + } catch (error) { + this.logger.error(`Failed to get status for job ${jobId}: ${error}`); + } + } + + this.logger.log( + `Successfully queried ${successfulJobQueries} jobs from ${jobList.jobids.length} total jobs` + ); + + const result: RCloneJobsWithStatsResponse = { + jobids: allJobs.map((job) => job.jobId), + stats: allJobs.map((job) => job.stats), + }; + + return result; + } + + async getAllJobsWithStats(): Promise { + const jobList = await this.listRunningJobs(); + + if (!jobList.jobids || jobList.jobids.length === 0) { + this.logger.log('No active jobs found in RClone'); + return { jobids: [], stats: [] }; + } + + this.logger.log( + `Found ${jobList.jobids.length} active jobs in RClone: [${jobList.jobids.join(', ')}]` + ); + + const allJobs: RCloneJobWithStats[] = []; + let successfulJobQueries = 0; + + for (const jobId of jobList.jobids) { + try { + const jobStatus = await this.getJobStatus({ jobId: String(jobId) }); + const group = jobStatus.group || ''; + + let detailedStats = {}; + if (group) { + try { + const groupStats = await this.getGroupStats(group); + if (groupStats && typeof groupStats === 'object') { + detailedStats = { ...groupStats }; + } + } catch (groupError) { + this.logger.warn( + `Failed to get core/stats for job ${jobId}, group ${group}: ${groupError}` + ); + } + } + + const enhancedStats = { + ...jobStatus.stats, + ...detailedStats, + }; + + const finalStats = this.enhanceStatsWithFormattedFields(enhancedStats); + + allJobs.push({ + jobId, + stats: finalStats, + }); + + successfulJobQueries++; + } catch (error) { + this.logger.error(`Failed to get status for job ${jobId}: ${error}`); + } + } + + this.logger.log( + `Successfully queried ${successfulJobQueries}/${jobList.jobids.length} jobs for detailed stats` + ); + + const result: RCloneJobsWithStatsResponse = { + jobids: allJobs.map((job) => job.jobId), + stats: allJobs.map((job) => job.stats), + }; + + return result; + } + + private enhanceStatsWithFormattedFields(stats: RCloneJobStats): RCloneJobStats { + const enhancedStats = { ...stats }; + + if (stats.bytes !== undefined && stats.bytes !== null) { + enhancedStats.formattedBytes = this.formatService.formatBytes(stats.bytes); + } + + if (stats.speed !== undefined && stats.speed !== null && stats.speed > 0) { + enhancedStats.formattedSpeed = this.formatService.formatBytes(stats.speed); + } + + if (stats.elapsedTime !== undefined && stats.elapsedTime !== null) { + enhancedStats.formattedElapsedTime = this.formatService.formatDuration(stats.elapsedTime); + } + + if (stats.eta !== undefined && stats.eta !== null && stats.eta > 0) { + enhancedStats.formattedEta = this.formatService.formatDuration(stats.eta); + } + + return enhancedStats; } - /** - * Generic method to call the RClone RC API - */ private async callRcloneApi(endpoint: string, params: Record = {}): Promise { const url = `${this.rcloneBaseUrl}/${endpoint}`; try { - this.logger.debug( - `Calling RClone API: ${url} with params: ${JSON.stringify(sanitizeParams(params))}` - ); - const response = await got.post(url, { json: params, responseType: 'json', diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts index 97cc7d04f..55556e636 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts @@ -4,6 +4,7 @@ import { type Layout } from '@jsonforms/core'; import { IsBoolean, IsObject, IsOptional, IsString } from 'class-validator'; import { GraphQLJSON } from 'graphql-scalars'; +import { PrefixedID } from '@app/unraid-api/graph/scalars/graphql-type-prefixed-id.js'; import { DataSlice } from '@app/unraid-api/types/json-forms.js'; @ObjectType() @@ -147,6 +148,16 @@ export class RCloneStartBackupInput { @IsString() dstPath!: string; + @Field(() => Boolean, { nullable: true, defaultValue: false }) + @IsOptional() + @IsBoolean() + async?: boolean; + + @Field(() => String, { nullable: true }) + @IsOptional() + @IsString() + group?: string; + @Field(() => GraphQLJSON, { nullable: true }) @IsOptional() @IsObject() @@ -206,3 +217,145 @@ export class GetRCloneJobStatusDto { @IsString() jobId!: string; } + +@ObjectType() +export class RCloneJobStats { + @Field(() => Number, { description: 'Bytes transferred', nullable: true }) + bytes?: number; + + @Field(() => Number, { description: 'Transfer speed in bytes/sec', nullable: true }) + speed?: number; + + @Field(() => Number, { description: 'Estimated time to completion in seconds', nullable: true }) + eta?: number; + + @Field(() => Number, { description: 'Elapsed time in seconds', nullable: true }) + elapsedTime?: number; + + @Field(() => Number, { description: 'Progress percentage (0-100)', nullable: true }) + percentage?: number; + + @Field(() => Number, { description: 'Number of checks completed', nullable: true }) + checks?: number; + + @Field(() => Number, { description: 'Number of deletes completed', nullable: true }) + deletes?: number; + + @Field(() => Number, { description: 'Number of errors encountered', nullable: true }) + errors?: number; + + @Field(() => Boolean, { description: 'Whether a fatal error occurred', nullable: true }) + fatalError?: boolean; + + @Field(() => String, { description: 'Last error message', nullable: true }) + lastError?: string; + + @Field(() => Number, { description: 'Number of renames completed', nullable: true }) + renames?: number; + + @Field(() => Boolean, { description: 'Whether there is a retry error', nullable: true }) + retryError?: boolean; + + @Field(() => Number, { description: 'Number of server-side copies', nullable: true }) + serverSideCopies?: number; + + @Field(() => Number, { description: 'Bytes in server-side copies', nullable: true }) + serverSideCopyBytes?: number; + + @Field(() => Number, { description: 'Number of server-side moves', nullable: true }) + serverSideMoves?: number; + + @Field(() => Number, { description: 'Bytes in server-side moves', nullable: true }) + serverSideMoveBytes?: number; + + @Field(() => Number, { description: 'Total bytes to transfer', nullable: true }) + totalBytes?: number; + + @Field(() => Number, { description: 'Total checks to perform', nullable: true }) + totalChecks?: number; + + @Field(() => Number, { description: 'Total transfers to perform', nullable: true }) + totalTransfers?: number; + + @Field(() => Number, { description: 'Time spent transferring in seconds', nullable: true }) + transferTime?: number; + + @Field(() => Number, { description: 'Number of transfers completed', nullable: true }) + transfers?: number; + + @Field(() => GraphQLJSON, { description: 'Currently transferring files', nullable: true }) + transferring?: any[]; + + @Field(() => GraphQLJSON, { description: 'Currently checking files', nullable: true }) + checking?: any[]; + + // Formatted fields + @Field(() => String, { description: 'Human-readable bytes transferred', nullable: true }) + formattedBytes?: string; + + @Field(() => String, { description: 'Human-readable transfer speed', nullable: true }) + formattedSpeed?: string; + + @Field(() => String, { description: 'Human-readable elapsed time', nullable: true }) + formattedElapsedTime?: string; + + @Field(() => String, { description: 'Human-readable ETA', nullable: true }) + formattedEta?: string; + + // Allow additional fields + [key: string]: any; +} + +@ObjectType() +export class RCloneJob { + @Field(() => PrefixedID, { description: 'Job ID' }) + id!: string; + + @Field(() => String, { description: 'RClone group for the job', nullable: true }) + group?: string; + + @Field(() => RCloneJobStats, { description: 'Job status and statistics', nullable: true }) + stats?: RCloneJobStats; + + @Field(() => Number, { description: 'Progress percentage (0-100)', nullable: true }) + progressPercentage?: number; + + @Field(() => PrefixedID, { description: 'Configuration ID that triggered this job', nullable: true }) + configId?: string; + + @Field(() => String, { description: 'Detailed status of the job', nullable: true }) + detailedStatus?: string; + + @Field(() => Boolean, { description: 'Whether the job is finished', nullable: true }) + finished?: boolean; + + @Field(() => Boolean, { description: 'Whether the job was successful', nullable: true }) + success?: boolean; + + @Field(() => String, { description: 'Error message if job failed', nullable: true }) + error?: string; +} + +// API Response Types (for internal use) +export interface RCloneJobListResponse { + jobids: (string | number)[]; +} + +export interface RCloneJobStatusResponse { + group?: string; + finished?: boolean; + success?: boolean; + error?: string; + stats?: RCloneJobStats; + [key: string]: any; +} + +export interface RCloneJobWithStats { + jobId: string | number; + stats: RCloneJobStats; +} + +export interface RCloneJobsWithStatsResponse { + jobids: (string | number)[]; + stats: RCloneJobStats[]; +} diff --git a/unraid-ui/src/components.ts b/unraid-ui/src/components.ts index 97d6da9d7..20dcd7ec9 100644 --- a/unraid-ui/src/components.ts +++ b/unraid-ui/src/components.ts @@ -7,7 +7,6 @@ export * from '@/components/common/loading'; export * from '@/components/form/input'; export * from '@/components/form/label'; export * from '@/components/form/number'; -export * from '@/components/form/lightswitch'; export * from '@/components/form/select'; export * from '@/components/form/switch'; export * from '@/components/common/scroll-area'; diff --git a/unraid-ui/src/components/form/lightswitch/Lightswitch.vue b/unraid-ui/src/components/form/lightswitch/Lightswitch.vue deleted file mode 100644 index 2d1e61ecb..000000000 --- a/unraid-ui/src/components/form/lightswitch/Lightswitch.vue +++ /dev/null @@ -1,67 +0,0 @@ - - diff --git a/unraid-ui/src/components/form/lightswitch/index.ts b/unraid-ui/src/components/form/lightswitch/index.ts deleted file mode 100644 index 3a563a1ff..000000000 --- a/unraid-ui/src/components/form/lightswitch/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default as Lightswitch } from './Lightswitch.vue'; diff --git a/web/components/Backup/BackupJobConfig.vue b/web/components/Backup/BackupJobConfig.vue index 3e3c7b25d..1b00ab2dc 100644 --- a/web/components/Backup/BackupJobConfig.vue +++ b/web/components/Backup/BackupJobConfig.vue @@ -1,23 +1,178 @@ + + - - \ No newline at end of file + diff --git a/web/components/Backup/BackupOverview.vue b/web/components/Backup/BackupOverview.vue index 93682d102..f646673bb 100644 --- a/web/components/Backup/BackupOverview.vue +++ b/web/components/Backup/BackupOverview.vue @@ -1,9 +1,24 @@ + + - - diff --git a/web/components/Backup/backup-jobs.query.ts b/web/components/Backup/backup-jobs.query.ts index b1b194670..5dd0991ab 100644 --- a/web/components/Backup/backup-jobs.query.ts +++ b/web/components/Backup/backup-jobs.query.ts @@ -1,28 +1,65 @@ import { graphql } from '~/composables/gql/gql'; + +export const BACKUP_STATS_FRAGMENT = graphql(/* GraphQL */ ` + fragment BackupStats on BackupJobStats { + bytes + speed + eta + elapsedTime + percentage + checks + deletes + errors + fatalError + lastError + renames + retryError + serverSideCopies + serverSideCopyBytes + serverSideMoves + serverSideMoveBytes + totalBytes + totalChecks + totalTransfers + transferTime + transfers + transferring + checking + formattedBytes + formattedSpeed + formattedElapsedTime + formattedEta + group + finished + success + error + } +`); + export const BACKUP_JOBS_QUERY = graphql(/* GraphQL */ ` - query BackupJobs { + query BackupJobs($showSystemJobs: Boolean) { backup { id - jobs { + jobs(showSystemJobs: $showSystemJobs) { id - type - stats - formattedBytes - formattedSpeed - formattedElapsedTime - formattedEta + group + stats { + ...BackupStats + } } } } `); export const BACKUP_JOB_QUERY = graphql(/* GraphQL */ ` - query BackupJob($jobId: String!) { + query BackupJob($jobId: PrefixedID!) { backupJob(jobId: $jobId) { id - type - stats + group + stats { + ...BackupStats + } } } `); @@ -60,16 +97,100 @@ export const BACKUP_JOB_CONFIG_FORM_QUERY = graphql(/* GraphQL */ ` export const CREATE_BACKUP_JOB_CONFIG_MUTATION = graphql(/* GraphQL */ ` mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) { - createBackupJobConfig(input: $input) { - id - name - sourcePath - remoteName - destinationPath - schedule - enabled - createdAt - updatedAt + backup { + createBackupJobConfig(input: $input) { + id + name + sourcePath + remoteName + destinationPath + schedule + enabled + createdAt + updatedAt + } } } `); + +export const UPDATE_BACKUP_JOB_CONFIG_MUTATION = graphql(/* GraphQL */ ` + mutation UpdateBackupJobConfig($id: String!, $input: UpdateBackupJobConfigInput!) { + backup { + updateBackupJobConfig(id: $id, input: $input) { + id + name + sourcePath + remoteName + destinationPath + schedule + enabled + createdAt + updatedAt + lastRunAt + lastRunStatus + } + } + } +`); + +export const DELETE_BACKUP_JOB_CONFIG_MUTATION = graphql(/* GraphQL */ ` + mutation DeleteBackupJobConfig($id: String!) { + backup { + deleteBackupJobConfig(id: $id) + } + } +`); + +export const TOGGLE_BACKUP_JOB_CONFIG_MUTATION = graphql(/* GraphQL */ ` + mutation ToggleBackupJobConfig($id: String!) { + backup { + toggleJobConfig(id: $id) { + id + name + sourcePath + remoteName + destinationPath + schedule + enabled + createdAt + updatedAt + lastRunAt + lastRunStatus + } + } + } +`); + +export const TRIGGER_BACKUP_JOB_MUTATION = graphql(/* GraphQL */ ` + mutation TriggerBackupJob($id: PrefixedID!) { + backup { + triggerJob(id: $id) { + status + jobId + } + } + } +`); + +export const INITIATE_BACKUP_MUTATION = graphql(/* GraphQL */ ` + mutation InitiateBackup($input: InitiateBackupInput!) { + backup { + initiateBackup(input: $input) { + status + jobId + } + } + } +`); + +export const BACKUP_JOB_PROGRESS_SUBSCRIPTION = graphql(/* GraphQL */ ` + subscription BackupJobProgress($jobId: PrefixedID!) { + backupJobProgress(jobId: $jobId) { + id + type + stats { + ...BackupStats + } + } + } +`); \ No newline at end of file diff --git a/web/composables/gql/gql.ts b/web/composables/gql/gql.ts index 17bb7b6f0..e796968af 100644 --- a/web/composables/gql/gql.ts +++ b/web/composables/gql/gql.ts @@ -20,11 +20,17 @@ type Documents = { "\n mutation CreateApiKey($input: CreateApiKeyInput!) {\n apiKey {\n create(input: $input) {\n id\n key\n name\n description\n createdAt\n roles\n permissions {\n resource\n actions\n }\n }\n }\n }\n": typeof types.CreateApiKeyDocument, "\n mutation DeleteApiKey($input: DeleteApiKeyInput!) {\n apiKey {\n delete(input: $input)\n }\n }\n": typeof types.DeleteApiKeyDocument, "\n query ApiKeyMeta {\n apiKeyPossibleRoles\n apiKeyPossiblePermissions {\n resource\n actions\n }\n }\n": typeof types.ApiKeyMetaDocument, - "\n query BackupJobs {\n backup {\n id\n jobs {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n": typeof types.BackupJobsDocument, - "\n query BackupJob($jobId: String!) {\n backupJob(jobId: $jobId) {\n id\n type\n stats\n }\n }\n": typeof types.BackupJobDocument, + "\n query BackupJobs($showSystemJobs: Boolean) {\n backup {\n id\n jobs(showSystemJobs: $showSystemJobs) {\n id\n group\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n": typeof types.BackupJobsDocument, + "\n query BackupJob($jobId: PrefixedID!) {\n backupJob(jobId: $jobId) {\n id\n group\n stats\n }\n }\n": typeof types.BackupJobDocument, "\n query BackupJobConfigs {\n backup {\n id\n configs {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n": typeof types.BackupJobConfigsDocument, "\n query BackupJobConfigForm($input: BackupJobConfigFormInput) {\n backupJobConfigForm(input: $input) {\n id\n dataSchema\n uiSchema\n }\n }\n": typeof types.BackupJobConfigFormDocument, - "\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n": typeof types.CreateBackupJobConfigDocument, + "\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n backup {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n }\n": typeof types.CreateBackupJobConfigDocument, + "\n mutation UpdateBackupJobConfig($id: String!, $input: UpdateBackupJobConfigInput!) {\n backup {\n updateBackupJobConfig(id: $id, input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n": typeof types.UpdateBackupJobConfigDocument, + "\n mutation DeleteBackupJobConfig($id: String!) {\n backup {\n deleteBackupJobConfig(id: $id)\n }\n }\n": typeof types.DeleteBackupJobConfigDocument, + "\n mutation ToggleBackupJobConfig($id: String!) {\n backup {\n toggleJobConfig(id: $id) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n": typeof types.ToggleBackupJobConfigDocument, + "\n mutation TriggerBackupJob($id: PrefixedID!) {\n backup {\n triggerJob(id: $id) {\n status\n jobId\n }\n }\n }\n": typeof types.TriggerBackupJobDocument, + "\n mutation InitiateBackup($input: InitiateBackupInput!) {\n backup {\n initiateBackup(input: $input) {\n status\n jobId\n }\n }\n }\n": typeof types.InitiateBackupDocument, + "\n subscription BackupJobProgress($jobId: PrefixedID!) {\n backupJobProgress(jobId: $jobId) {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n": typeof types.BackupJobProgressDocument, "\n query GetConnectSettingsForm {\n connect {\n id\n settings {\n id\n dataSchema\n uiSchema\n values {\n sandbox\n extraOrigins\n accessType\n forwardType\n port\n ssoUserIds\n }\n }\n }\n }\n": typeof types.GetConnectSettingsFormDocument, "\n mutation UpdateConnectSettings($input: ApiSettingsInput!) {\n updateApiSettings(input: $input) {\n sandbox\n extraOrigins\n accessType\n forwardType\n port\n ssoUserIds\n }\n }\n": typeof types.UpdateConnectSettingsDocument, "\n query LogFiles {\n logFiles {\n name\n path\n size\n modifiedAt\n }\n }\n": typeof types.LogFilesDocument, @@ -62,11 +68,17 @@ const documents: Documents = { "\n mutation CreateApiKey($input: CreateApiKeyInput!) {\n apiKey {\n create(input: $input) {\n id\n key\n name\n description\n createdAt\n roles\n permissions {\n resource\n actions\n }\n }\n }\n }\n": types.CreateApiKeyDocument, "\n mutation DeleteApiKey($input: DeleteApiKeyInput!) {\n apiKey {\n delete(input: $input)\n }\n }\n": types.DeleteApiKeyDocument, "\n query ApiKeyMeta {\n apiKeyPossibleRoles\n apiKeyPossiblePermissions {\n resource\n actions\n }\n }\n": types.ApiKeyMetaDocument, - "\n query BackupJobs {\n backup {\n id\n jobs {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n": types.BackupJobsDocument, - "\n query BackupJob($jobId: String!) {\n backupJob(jobId: $jobId) {\n id\n type\n stats\n }\n }\n": types.BackupJobDocument, + "\n query BackupJobs($showSystemJobs: Boolean) {\n backup {\n id\n jobs(showSystemJobs: $showSystemJobs) {\n id\n group\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n": types.BackupJobsDocument, + "\n query BackupJob($jobId: PrefixedID!) {\n backupJob(jobId: $jobId) {\n id\n group\n stats\n }\n }\n": types.BackupJobDocument, "\n query BackupJobConfigs {\n backup {\n id\n configs {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n": types.BackupJobConfigsDocument, "\n query BackupJobConfigForm($input: BackupJobConfigFormInput) {\n backupJobConfigForm(input: $input) {\n id\n dataSchema\n uiSchema\n }\n }\n": types.BackupJobConfigFormDocument, - "\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n": types.CreateBackupJobConfigDocument, + "\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n backup {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n }\n": types.CreateBackupJobConfigDocument, + "\n mutation UpdateBackupJobConfig($id: String!, $input: UpdateBackupJobConfigInput!) {\n backup {\n updateBackupJobConfig(id: $id, input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n": types.UpdateBackupJobConfigDocument, + "\n mutation DeleteBackupJobConfig($id: String!) {\n backup {\n deleteBackupJobConfig(id: $id)\n }\n }\n": types.DeleteBackupJobConfigDocument, + "\n mutation ToggleBackupJobConfig($id: String!) {\n backup {\n toggleJobConfig(id: $id) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n": types.ToggleBackupJobConfigDocument, + "\n mutation TriggerBackupJob($id: PrefixedID!) {\n backup {\n triggerJob(id: $id) {\n status\n jobId\n }\n }\n }\n": types.TriggerBackupJobDocument, + "\n mutation InitiateBackup($input: InitiateBackupInput!) {\n backup {\n initiateBackup(input: $input) {\n status\n jobId\n }\n }\n }\n": types.InitiateBackupDocument, + "\n subscription BackupJobProgress($jobId: PrefixedID!) {\n backupJobProgress(jobId: $jobId) {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n": types.BackupJobProgressDocument, "\n query GetConnectSettingsForm {\n connect {\n id\n settings {\n id\n dataSchema\n uiSchema\n values {\n sandbox\n extraOrigins\n accessType\n forwardType\n port\n ssoUserIds\n }\n }\n }\n }\n": types.GetConnectSettingsFormDocument, "\n mutation UpdateConnectSettings($input: ApiSettingsInput!) {\n updateApiSettings(input: $input) {\n sandbox\n extraOrigins\n accessType\n forwardType\n port\n ssoUserIds\n }\n }\n": types.UpdateConnectSettingsDocument, "\n query LogFiles {\n logFiles {\n name\n path\n size\n modifiedAt\n }\n }\n": types.LogFilesDocument, @@ -139,11 +151,11 @@ export function graphql(source: "\n query ApiKeyMeta {\n apiKeyPossibleRoles /** * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. */ -export function graphql(source: "\n query BackupJobs {\n backup {\n id\n jobs {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n"): (typeof documents)["\n query BackupJobs {\n backup {\n id\n jobs {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n"]; +export function graphql(source: "\n query BackupJobs($showSystemJobs: Boolean) {\n backup {\n id\n jobs(showSystemJobs: $showSystemJobs) {\n id\n group\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n"): (typeof documents)["\n query BackupJobs($showSystemJobs: Boolean) {\n backup {\n id\n jobs(showSystemJobs: $showSystemJobs) {\n id\n group\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n }\n"]; /** * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. */ -export function graphql(source: "\n query BackupJob($jobId: String!) {\n backupJob(jobId: $jobId) {\n id\n type\n stats\n }\n }\n"): (typeof documents)["\n query BackupJob($jobId: String!) {\n backupJob(jobId: $jobId) {\n id\n type\n stats\n }\n }\n"]; +export function graphql(source: "\n query BackupJob($jobId: PrefixedID!) {\n backupJob(jobId: $jobId) {\n id\n group\n stats\n }\n }\n"): (typeof documents)["\n query BackupJob($jobId: PrefixedID!) {\n backupJob(jobId: $jobId) {\n id\n group\n stats\n }\n }\n"]; /** * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. */ @@ -155,7 +167,31 @@ export function graphql(source: "\n query BackupJobConfigForm($input: BackupJob /** * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. */ -export function graphql(source: "\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n"): (typeof documents)["\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n"]; +export function graphql(source: "\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n backup {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n }\n"): (typeof documents)["\n mutation CreateBackupJobConfig($input: CreateBackupJobConfigInput!) {\n backup {\n createBackupJobConfig(input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n }\n }\n }\n"]; +/** + * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. + */ +export function graphql(source: "\n mutation UpdateBackupJobConfig($id: String!, $input: UpdateBackupJobConfigInput!) {\n backup {\n updateBackupJobConfig(id: $id, input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n"): (typeof documents)["\n mutation UpdateBackupJobConfig($id: String!, $input: UpdateBackupJobConfigInput!) {\n backup {\n updateBackupJobConfig(id: $id, input: $input) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n"]; +/** + * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. + */ +export function graphql(source: "\n mutation DeleteBackupJobConfig($id: String!) {\n backup {\n deleteBackupJobConfig(id: $id)\n }\n }\n"): (typeof documents)["\n mutation DeleteBackupJobConfig($id: String!) {\n backup {\n deleteBackupJobConfig(id: $id)\n }\n }\n"]; +/** + * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. + */ +export function graphql(source: "\n mutation ToggleBackupJobConfig($id: String!) {\n backup {\n toggleJobConfig(id: $id) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n"): (typeof documents)["\n mutation ToggleBackupJobConfig($id: String!) {\n backup {\n toggleJobConfig(id: $id) {\n id\n name\n sourcePath\n remoteName\n destinationPath\n schedule\n enabled\n createdAt\n updatedAt\n lastRunAt\n lastRunStatus\n }\n }\n }\n"]; +/** + * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. + */ +export function graphql(source: "\n mutation TriggerBackupJob($id: PrefixedID!) {\n backup {\n triggerJob(id: $id) {\n status\n jobId\n }\n }\n }\n"): (typeof documents)["\n mutation TriggerBackupJob($id: PrefixedID!) {\n backup {\n triggerJob(id: $id) {\n status\n jobId\n }\n }\n }\n"]; +/** + * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. + */ +export function graphql(source: "\n mutation InitiateBackup($input: InitiateBackupInput!) {\n backup {\n initiateBackup(input: $input) {\n status\n jobId\n }\n }\n }\n"): (typeof documents)["\n mutation InitiateBackup($input: InitiateBackupInput!) {\n backup {\n initiateBackup(input: $input) {\n status\n jobId\n }\n }\n }\n"]; +/** + * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. + */ +export function graphql(source: "\n subscription BackupJobProgress($jobId: PrefixedID!) {\n backupJobProgress(jobId: $jobId) {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n"): (typeof documents)["\n subscription BackupJobProgress($jobId: PrefixedID!) {\n backupJobProgress(jobId: $jobId) {\n id\n type\n stats\n formattedBytes\n formattedSpeed\n formattedElapsedTime\n formattedEta\n }\n }\n"]; /** * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. */ diff --git a/web/composables/gql/graphql.ts b/web/composables/gql/graphql.ts index 953688906..9150fe5a7 100644 --- a/web/composables/gql/graphql.ts +++ b/web/composables/gql/graphql.ts @@ -382,8 +382,17 @@ export type Backup = Node & { status: BackupStatus; }; + +export type BackupJobsArgs = { + showSystemJobs?: InputMaybe; +}; + export type BackupJob = { __typename?: 'BackupJob'; + /** Configuration ID that triggered this job */ + configId?: Maybe; + /** Detailed status of the job */ + detailedStatus?: Maybe; /** Formatted bytes transferred */ formattedBytes?: Maybe; /** Formatted elapsed time */ @@ -392,15 +401,19 @@ export type BackupJob = { formattedEta?: Maybe; /** Formatted transfer speed */ formattedSpeed?: Maybe; + /** RClone group for the job */ + group?: Maybe; /** Job ID */ - id: Scalars['String']['output']; + id: Scalars['PrefixedID']['output']; + /** Progress percentage (0-100) */ + progressPercentage?: Maybe; /** Job status and statistics */ stats: Scalars['JSON']['output']; /** Job type (e.g., sync/copy) */ type: Scalars['String']['output']; }; -export type BackupJobConfig = { +export type BackupJobConfig = Node & { __typename?: 'BackupJobConfig'; /** When this config was created */ createdAt: Scalars['DateTime']['output']; @@ -430,7 +443,7 @@ export type BackupJobConfig = { export type BackupJobConfigForm = { __typename?: 'BackupJobConfigForm'; dataSchema: Scalars['JSON']['output']; - id: Scalars['ID']['output']; + id: Scalars['PrefixedID']['output']; uiSchema: Scalars['JSON']['output']; }; @@ -438,6 +451,60 @@ export type BackupJobConfigFormInput = { showAdvanced?: Scalars['Boolean']['input']; }; +/** Backup related mutations */ +export type BackupMutations = { + __typename?: 'BackupMutations'; + /** Create a new backup job configuration */ + createBackupJobConfig: BackupJobConfig; + /** Delete a backup job configuration */ + deleteBackupJobConfig: Scalars['Boolean']['output']; + /** Initiates a backup using a configured remote. */ + initiateBackup: BackupStatus; + /** Toggle a backup job configuration enabled/disabled */ + toggleJobConfig?: Maybe; + /** Manually trigger a backup job using existing configuration */ + triggerJob: BackupStatus; + /** Update a backup job configuration */ + updateBackupJobConfig?: Maybe; +}; + + +/** Backup related mutations */ +export type BackupMutationsCreateBackupJobConfigArgs = { + input: CreateBackupJobConfigInput; +}; + + +/** Backup related mutations */ +export type BackupMutationsDeleteBackupJobConfigArgs = { + id: Scalars['String']['input']; +}; + + +/** Backup related mutations */ +export type BackupMutationsInitiateBackupArgs = { + input: InitiateBackupInput; +}; + + +/** Backup related mutations */ +export type BackupMutationsToggleJobConfigArgs = { + id: Scalars['String']['input']; +}; + + +/** Backup related mutations */ +export type BackupMutationsTriggerJobArgs = { + id: Scalars['PrefixedID']['input']; +}; + + +/** Backup related mutations */ +export type BackupMutationsUpdateBackupJobConfigArgs = { + id: Scalars['String']['input']; + input: UpdateBackupJobConfigInput; +}; + export type BackupStatus = { __typename?: 'BackupStatus'; /** Job ID if available, can be used to check job status. */ @@ -999,21 +1066,16 @@ export type Mutation = { archiveNotification: Notification; archiveNotifications: NotificationOverview; array: ArrayMutations; + backup: BackupMutations; connectSignIn: Scalars['Boolean']['output']; connectSignOut: Scalars['Boolean']['output']; - /** Create a new backup job configuration */ - createBackupJobConfig: BackupJobConfig; /** Creates a new notification record */ createNotification: Notification; /** Deletes all archived notifications on server. */ deleteArchivedNotifications: NotificationOverview; - /** Delete a backup job configuration */ - deleteBackupJobConfig: Scalars['Boolean']['output']; deleteNotification: NotificationOverview; docker: DockerMutations; enableDynamicRemoteAccess: Scalars['Boolean']['output']; - /** Initiates a backup using a configured remote. */ - initiateBackup: BackupStatus; parityCheck: ParityCheckMutations; rclone: RCloneMutations; /** Reads each notification to recompute & update the overview. */ @@ -1026,8 +1088,6 @@ export type Mutation = { /** Marks a notification as unread. */ unreadNotification: Notification; updateApiSettings: ConnectSettingsValues; - /** Update a backup job configuration */ - updateBackupJobConfig?: Maybe; vm: VmMutations; }; @@ -1052,21 +1112,11 @@ export type MutationConnectSignInArgs = { }; -export type MutationCreateBackupJobConfigArgs = { - input: CreateBackupJobConfigInput; -}; - - export type MutationCreateNotificationArgs = { input: NotificationData; }; -export type MutationDeleteBackupJobConfigArgs = { - id: Scalars['String']['input']; -}; - - export type MutationDeleteNotificationArgs = { id: Scalars['PrefixedID']['input']; type: NotificationType; @@ -1078,11 +1128,6 @@ export type MutationEnableDynamicRemoteAccessArgs = { }; -export type MutationInitiateBackupArgs = { - input: InitiateBackupInput; -}; - - export type MutationSetAdditionalAllowedOriginsArgs = { input: AllowedOriginInput; }; @@ -1112,12 +1157,6 @@ export type MutationUpdateApiSettingsArgs = { input: ApiSettingsInput; }; - -export type MutationUpdateBackupJobConfigArgs = { - id: Scalars['String']['input']; - input: UpdateBackupJobConfigInput; -}; - export type Network = Node & { __typename?: 'Network'; accessUrls?: Maybe>; @@ -1358,7 +1397,7 @@ export type QueryApiKeyArgs = { export type QueryBackupJobArgs = { - jobId: Scalars['String']['input']; + jobId: Scalars['PrefixedID']['input']; }; @@ -1512,6 +1551,7 @@ export enum Resource { ACTIVATION_CODE = 'ACTIVATION_CODE', API_KEY = 'API_KEY', ARRAY = 'ARRAY', + BACKUP = 'BACKUP', CLOUD = 'CLOUD', CONFIG = 'CONFIG', CONNECT = 'CONNECT', @@ -1623,6 +1663,8 @@ export type Share = Node & { export type Subscription = { __typename?: 'Subscription'; arraySubscription: UnraidArray; + /** Subscribe to real-time backup job progress updates */ + backupJobProgress?: Maybe; displaySubscription: Display; infoSubscription: Info; logFile: LogFileContent; @@ -1635,6 +1677,11 @@ export type Subscription = { }; +export type SubscriptionBackupJobProgressArgs = { + jobId: Scalars['PrefixedID']['input']; +}; + + export type SubscriptionLogFileArgs = { path: Scalars['String']['input']; }; @@ -2080,17 +2127,19 @@ export type ApiKeyMetaQueryVariables = Exact<{ [key: string]: never; }>; export type ApiKeyMetaQuery = { __typename?: 'Query', apiKeyPossibleRoles: Array, apiKeyPossiblePermissions: Array<{ __typename?: 'Permission', resource: Resource, actions: Array }> }; -export type BackupJobsQueryVariables = Exact<{ [key: string]: never; }>; - - -export type BackupJobsQuery = { __typename?: 'Query', backup: { __typename?: 'Backup', id: string, jobs: Array<{ __typename?: 'BackupJob', id: string, type: string, stats: any, formattedBytes?: string | null, formattedSpeed?: string | null, formattedElapsedTime?: string | null, formattedEta?: string | null }> } }; - -export type BackupJobQueryVariables = Exact<{ - jobId: Scalars['String']['input']; +export type BackupJobsQueryVariables = Exact<{ + showSystemJobs?: InputMaybe; }>; -export type BackupJobQuery = { __typename?: 'Query', backupJob?: { __typename?: 'BackupJob', id: string, type: string, stats: any } | null }; +export type BackupJobsQuery = { __typename?: 'Query', backup: { __typename?: 'Backup', id: string, jobs: Array<{ __typename?: 'BackupJob', id: string, group?: string | null, stats: any, formattedBytes?: string | null, formattedSpeed?: string | null, formattedElapsedTime?: string | null, formattedEta?: string | null }> } }; + +export type BackupJobQueryVariables = Exact<{ + jobId: Scalars['PrefixedID']['input']; +}>; + + +export type BackupJobQuery = { __typename?: 'Query', backupJob?: { __typename?: 'BackupJob', id: string, group?: string | null, stats: any } | null }; export type BackupJobConfigsQueryVariables = Exact<{ [key: string]: never; }>; @@ -2109,7 +2158,50 @@ export type CreateBackupJobConfigMutationVariables = Exact<{ }>; -export type CreateBackupJobConfigMutation = { __typename?: 'Mutation', createBackupJobConfig: { __typename?: 'BackupJobConfig', id: string, name: string, sourcePath: string, remoteName: string, destinationPath: string, schedule: string, enabled: boolean, createdAt: string, updatedAt: string } }; +export type CreateBackupJobConfigMutation = { __typename?: 'Mutation', backup: { __typename?: 'BackupMutations', createBackupJobConfig: { __typename?: 'BackupJobConfig', id: string, name: string, sourcePath: string, remoteName: string, destinationPath: string, schedule: string, enabled: boolean, createdAt: string, updatedAt: string } } }; + +export type UpdateBackupJobConfigMutationVariables = Exact<{ + id: Scalars['String']['input']; + input: UpdateBackupJobConfigInput; +}>; + + +export type UpdateBackupJobConfigMutation = { __typename?: 'Mutation', backup: { __typename?: 'BackupMutations', updateBackupJobConfig?: { __typename?: 'BackupJobConfig', id: string, name: string, sourcePath: string, remoteName: string, destinationPath: string, schedule: string, enabled: boolean, createdAt: string, updatedAt: string, lastRunAt?: string | null, lastRunStatus?: string | null } | null } }; + +export type DeleteBackupJobConfigMutationVariables = Exact<{ + id: Scalars['String']['input']; +}>; + + +export type DeleteBackupJobConfigMutation = { __typename?: 'Mutation', backup: { __typename?: 'BackupMutations', deleteBackupJobConfig: boolean } }; + +export type ToggleBackupJobConfigMutationVariables = Exact<{ + id: Scalars['String']['input']; +}>; + + +export type ToggleBackupJobConfigMutation = { __typename?: 'Mutation', backup: { __typename?: 'BackupMutations', toggleJobConfig?: { __typename?: 'BackupJobConfig', id: string, name: string, sourcePath: string, remoteName: string, destinationPath: string, schedule: string, enabled: boolean, createdAt: string, updatedAt: string, lastRunAt?: string | null, lastRunStatus?: string | null } | null } }; + +export type TriggerBackupJobMutationVariables = Exact<{ + id: Scalars['PrefixedID']['input']; +}>; + + +export type TriggerBackupJobMutation = { __typename?: 'Mutation', backup: { __typename?: 'BackupMutations', triggerJob: { __typename?: 'BackupStatus', status: string, jobId?: string | null } } }; + +export type InitiateBackupMutationVariables = Exact<{ + input: InitiateBackupInput; +}>; + + +export type InitiateBackupMutation = { __typename?: 'Mutation', backup: { __typename?: 'BackupMutations', initiateBackup: { __typename?: 'BackupStatus', status: string, jobId?: string | null } } }; + +export type BackupJobProgressSubscriptionVariables = Exact<{ + jobId: Scalars['PrefixedID']['input']; +}>; + + +export type BackupJobProgressSubscription = { __typename?: 'Subscription', backupJobProgress?: { __typename?: 'BackupJob', id: string, type: string, stats: any, formattedBytes?: string | null, formattedSpeed?: string | null, formattedElapsedTime?: string | null, formattedEta?: string | null } | null }; export type GetConnectSettingsFormQueryVariables = Exact<{ [key: string]: never; }>; @@ -2307,11 +2399,17 @@ export const ApiKeysDocument = {"kind":"Document","definitions":[{"kind":"Operat export const CreateApiKeyDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"CreateApiKey"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"CreateApiKeyInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"apiKey"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"create"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"key"}},{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"description"}},{"kind":"Field","name":{"kind":"Name","value":"createdAt"}},{"kind":"Field","name":{"kind":"Name","value":"roles"}},{"kind":"Field","name":{"kind":"Name","value":"permissions"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"resource"}},{"kind":"Field","name":{"kind":"Name","value":"actions"}}]}}]}}]}}]}}]} as unknown as DocumentNode; export const DeleteApiKeyDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"DeleteApiKey"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"DeleteApiKeyInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"apiKey"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"delete"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}]}]}}]}}]} as unknown as DocumentNode; export const ApiKeyMetaDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ApiKeyMeta"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"apiKeyPossibleRoles"}},{"kind":"Field","name":{"kind":"Name","value":"apiKeyPossiblePermissions"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"resource"}},{"kind":"Field","name":{"kind":"Name","value":"actions"}}]}}]}}]} as unknown as DocumentNode; -export const BackupJobsDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BackupJobs"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"jobs"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"type"}},{"kind":"Field","name":{"kind":"Name","value":"stats"}},{"kind":"Field","name":{"kind":"Name","value":"formattedBytes"}},{"kind":"Field","name":{"kind":"Name","value":"formattedSpeed"}},{"kind":"Field","name":{"kind":"Name","value":"formattedElapsedTime"}},{"kind":"Field","name":{"kind":"Name","value":"formattedEta"}}]}}]}}]}}]} as unknown as DocumentNode; -export const BackupJobDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BackupJob"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"jobId"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backupJob"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"jobId"},"value":{"kind":"Variable","name":{"kind":"Name","value":"jobId"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"type"}},{"kind":"Field","name":{"kind":"Name","value":"stats"}}]}}]}}]} as unknown as DocumentNode; +export const BackupJobsDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BackupJobs"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"showSystemJobs"}},"type":{"kind":"NamedType","name":{"kind":"Name","value":"Boolean"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"jobs"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"showSystemJobs"},"value":{"kind":"Variable","name":{"kind":"Name","value":"showSystemJobs"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"group"}},{"kind":"Field","name":{"kind":"Name","value":"stats"}},{"kind":"Field","name":{"kind":"Name","value":"formattedBytes"}},{"kind":"Field","name":{"kind":"Name","value":"formattedSpeed"}},{"kind":"Field","name":{"kind":"Name","value":"formattedElapsedTime"}},{"kind":"Field","name":{"kind":"Name","value":"formattedEta"}}]}}]}}]}}]} as unknown as DocumentNode; +export const BackupJobDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BackupJob"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"jobId"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"PrefixedID"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backupJob"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"jobId"},"value":{"kind":"Variable","name":{"kind":"Name","value":"jobId"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"group"}},{"kind":"Field","name":{"kind":"Name","value":"stats"}}]}}]}}]} as unknown as DocumentNode; export const BackupJobConfigsDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BackupJobConfigs"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"configs"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"sourcePath"}},{"kind":"Field","name":{"kind":"Name","value":"remoteName"}},{"kind":"Field","name":{"kind":"Name","value":"destinationPath"}},{"kind":"Field","name":{"kind":"Name","value":"schedule"}},{"kind":"Field","name":{"kind":"Name","value":"enabled"}},{"kind":"Field","name":{"kind":"Name","value":"createdAt"}},{"kind":"Field","name":{"kind":"Name","value":"updatedAt"}},{"kind":"Field","name":{"kind":"Name","value":"lastRunAt"}},{"kind":"Field","name":{"kind":"Name","value":"lastRunStatus"}}]}}]}}]}}]} as unknown as DocumentNode; export const BackupJobConfigFormDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BackupJobConfigForm"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NamedType","name":{"kind":"Name","value":"BackupJobConfigFormInput"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backupJobConfigForm"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"dataSchema"}},{"kind":"Field","name":{"kind":"Name","value":"uiSchema"}}]}}]}}]} as unknown as DocumentNode; -export const CreateBackupJobConfigDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"CreateBackupJobConfig"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"CreateBackupJobConfigInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"createBackupJobConfig"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"sourcePath"}},{"kind":"Field","name":{"kind":"Name","value":"remoteName"}},{"kind":"Field","name":{"kind":"Name","value":"destinationPath"}},{"kind":"Field","name":{"kind":"Name","value":"schedule"}},{"kind":"Field","name":{"kind":"Name","value":"enabled"}},{"kind":"Field","name":{"kind":"Name","value":"createdAt"}},{"kind":"Field","name":{"kind":"Name","value":"updatedAt"}}]}}]}}]} as unknown as DocumentNode; +export const CreateBackupJobConfigDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"CreateBackupJobConfig"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"CreateBackupJobConfigInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"createBackupJobConfig"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"sourcePath"}},{"kind":"Field","name":{"kind":"Name","value":"remoteName"}},{"kind":"Field","name":{"kind":"Name","value":"destinationPath"}},{"kind":"Field","name":{"kind":"Name","value":"schedule"}},{"kind":"Field","name":{"kind":"Name","value":"enabled"}},{"kind":"Field","name":{"kind":"Name","value":"createdAt"}},{"kind":"Field","name":{"kind":"Name","value":"updatedAt"}}]}}]}}]}}]} as unknown as DocumentNode; +export const UpdateBackupJobConfigDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"UpdateBackupJobConfig"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"UpdateBackupJobConfigInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"updateBackupJobConfig"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}},{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"sourcePath"}},{"kind":"Field","name":{"kind":"Name","value":"remoteName"}},{"kind":"Field","name":{"kind":"Name","value":"destinationPath"}},{"kind":"Field","name":{"kind":"Name","value":"schedule"}},{"kind":"Field","name":{"kind":"Name","value":"enabled"}},{"kind":"Field","name":{"kind":"Name","value":"createdAt"}},{"kind":"Field","name":{"kind":"Name","value":"updatedAt"}},{"kind":"Field","name":{"kind":"Name","value":"lastRunAt"}},{"kind":"Field","name":{"kind":"Name","value":"lastRunStatus"}}]}}]}}]}}]} as unknown as DocumentNode; +export const DeleteBackupJobConfigDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"DeleteBackupJobConfig"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"deleteBackupJobConfig"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}}]}]}}]}}]} as unknown as DocumentNode; +export const ToggleBackupJobConfigDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"ToggleBackupJobConfig"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"toggleJobConfig"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"sourcePath"}},{"kind":"Field","name":{"kind":"Name","value":"remoteName"}},{"kind":"Field","name":{"kind":"Name","value":"destinationPath"}},{"kind":"Field","name":{"kind":"Name","value":"schedule"}},{"kind":"Field","name":{"kind":"Name","value":"enabled"}},{"kind":"Field","name":{"kind":"Name","value":"createdAt"}},{"kind":"Field","name":{"kind":"Name","value":"updatedAt"}},{"kind":"Field","name":{"kind":"Name","value":"lastRunAt"}},{"kind":"Field","name":{"kind":"Name","value":"lastRunStatus"}}]}}]}}]}}]} as unknown as DocumentNode; +export const TriggerBackupJobDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"TriggerBackupJob"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"PrefixedID"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"triggerJob"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"status"}},{"kind":"Field","name":{"kind":"Name","value":"jobId"}}]}}]}}]}}]} as unknown as DocumentNode; +export const InitiateBackupDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"InitiateBackup"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"InitiateBackupInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backup"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"initiateBackup"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"status"}},{"kind":"Field","name":{"kind":"Name","value":"jobId"}}]}}]}}]}}]} as unknown as DocumentNode; +export const BackupJobProgressDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"subscription","name":{"kind":"Name","value":"BackupJobProgress"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"jobId"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"PrefixedID"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"backupJobProgress"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"jobId"},"value":{"kind":"Variable","name":{"kind":"Name","value":"jobId"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"type"}},{"kind":"Field","name":{"kind":"Name","value":"stats"}},{"kind":"Field","name":{"kind":"Name","value":"formattedBytes"}},{"kind":"Field","name":{"kind":"Name","value":"formattedSpeed"}},{"kind":"Field","name":{"kind":"Name","value":"formattedElapsedTime"}},{"kind":"Field","name":{"kind":"Name","value":"formattedEta"}}]}}]}}]} as unknown as DocumentNode; export const GetConnectSettingsFormDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"GetConnectSettingsForm"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"connect"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"settings"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"dataSchema"}},{"kind":"Field","name":{"kind":"Name","value":"uiSchema"}},{"kind":"Field","name":{"kind":"Name","value":"values"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"sandbox"}},{"kind":"Field","name":{"kind":"Name","value":"extraOrigins"}},{"kind":"Field","name":{"kind":"Name","value":"accessType"}},{"kind":"Field","name":{"kind":"Name","value":"forwardType"}},{"kind":"Field","name":{"kind":"Name","value":"port"}},{"kind":"Field","name":{"kind":"Name","value":"ssoUserIds"}}]}}]}}]}}]}}]} as unknown as DocumentNode; export const UpdateConnectSettingsDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"UpdateConnectSettings"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"ApiSettingsInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"updateApiSettings"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"sandbox"}},{"kind":"Field","name":{"kind":"Name","value":"extraOrigins"}},{"kind":"Field","name":{"kind":"Name","value":"accessType"}},{"kind":"Field","name":{"kind":"Name","value":"forwardType"}},{"kind":"Field","name":{"kind":"Name","value":"port"}},{"kind":"Field","name":{"kind":"Name","value":"ssoUserIds"}}]}}]}}]} as unknown as DocumentNode; export const LogFilesDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"LogFiles"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"logFiles"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"name"}},{"kind":"Field","name":{"kind":"Name","value":"path"}},{"kind":"Field","name":{"kind":"Name","value":"size"}},{"kind":"Field","name":{"kind":"Name","value":"modifiedAt"}}]}}]}}]} as unknown as DocumentNode;