Merge pull request #122 from trycua/feature/lume/sparse-push-pull

[Lume] Sparse Push & Pull Optimizations
This commit is contained in:
f-trycua
2025-04-22 22:46:54 +02:00
committed by GitHub
14 changed files with 2987 additions and 850 deletions

233
libs/lume/.cursorignore Normal file
View File

@@ -0,0 +1,233 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
!libs/lume/scripts/build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Scripts
server/scripts/
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# Conda
.conda/
# Local environment
.env.local
# macOS DS_Store
.DS_Store
weights/
weights/icon_detect/
weights/icon_detect/model.pt
weights/icon_detect/model.pt.zip
weights/icon_detect/model.pt.zip.part*
libs/omniparser/weights/icon_detect/model.pt
# Example test data and output
examples/test_data/
examples/output/
/screenshots/
/experiments/
/logs/
# Xcode
#
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
## User settings
xcuserdata/
## Obj-C/Swift specific
*.hmap
## App packaging
*.ipa
*.dSYM.zip
*.dSYM
## Playgrounds
timeline.xctimeline
playground.xcworkspace
# Swift Package Manager
#
# Add this line if you want to avoid checking in source code from Swift Package Manager dependencies.
# Packages/
# Package.pins
# Package.resolved
# *.xcodeproj
#
# Xcode automatically generates this directory with a .xcworkspacedata file and xcuserdata
# hence it is not needed unless you have added a package configuration file to your project
.swiftpm/
.build/
# CocoaPods
#
# We recommend against adding the Pods directory to your .gitignore. However
# you should judge for yourself, the pros and cons are mentioned at:
# https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
#
# Pods/
#
# Add this line if you want to avoid checking in source code from the Xcode workspace
# *.xcworkspace
# Carthage
#
# Add this line if you want to avoid checking in source code from Carthage dependencies.
# Carthage/Checkouts
Carthage/Build/
# fastlane
#
# It is recommended to not store the screenshots in the git repo.
# Instead, use fastlane to re-generate the screenshots whenever they are needed.
# For more information about the recommended setup visit:
# https://docs.fastlane.tools/best-practices/source-control/#source-control
fastlane/report.xml
fastlane/Preview.html
fastlane/screenshots/**/*.png
fastlane/test_output
# Ignore folder
ignore
# .release
.release/

View File

@@ -52,6 +52,7 @@ Commands:
lume stop <name> Stop a running VM
lume delete <name> Delete a VM
lume pull <image> Pull a macOS image from container registry
lume push <name> <image:tag> Push a VM image to a container registry
lume clone <name> <new-name> Clone an existing VM
lume config Get or set lume configuration
lume images List available macOS images in local cache
@@ -99,6 +100,16 @@ Command Options:
--organization <org> Organization to pull from (default: trycua)
--storage <name> VM storage location to use
push:
--additional-tags <tags...> Additional tags to push the same image to
--registry <url> Container registry URL (default: ghcr.io)
--organization <org> Organization/user to push to (default: trycua)
--storage <name> VM storage location to use
--chunk-size-mb <size> Chunk size for disk image upload in MB (default: 512)
--verbose Enable verbose logging
--dry-run Prepare files and show plan without uploading
--reassemble Verify integrity by reassembling chunks (requires --dry-run)
get:
-f, --format <format> Output format (json|text)
--storage <name> VM storage location to use
@@ -141,18 +152,21 @@ You can also download the `lume.pkg.tar.gz` archive from the [latest release](ht
## Prebuilt Images
Pre-built images are available in the registry [ghcr.io/trycua](https://github.com/orgs/trycua/packages).
**Important Note (v0.2.0+):** Images are being re-uploaded with sparse file system optimizations enabled, resulting in significantly lower actual disk usage. Older images (without the `-sparse` suffix) are now **deprecated**. The last version of `lume` fully supporting the non-sparse images was `v0.1.x`. Starting from `lume v0.2.0`, please use the images with the `-sparse` suffix.
These images come with an SSH server pre-configured and auto-login enabled.
For the security of your VM, change the default password `lume` immediately after your first login.
| Image | Tag | Description | Size |
| Image | Tag | Description | Logical Size |
|-------|------------|-------------|------|
| `macos-sequoia-vanilla` | `latest`, `15.2` | macOS Sequoia 15.2 image | 40GB |
| `macos-sequoia-xcode` | `latest`, `15.2` | macOS Sequoia 15.2 image with Xcode command line tools | 50GB |
| `macos-sequoia-cua` | `latest`, `15.3` | macOS Sequoia 15.3 image compatible with the Computer interface | 80GB |
| `ubuntu-noble-vanilla` | `latest`, `24.04.1` | [Ubuntu Server for ARM 24.04.1 LTS](https://ubuntu.com/download/server/arm) with Ubuntu Desktop | 20GB |
| `macos-sequoia-vanilla-sparse` | `latest`, `15.2` | macOS Sequoia 15.2 image | 40GB |
| `macos-sequoia-xcode-sparse` | `latest`, `15.2` | macOS Sequoia 15.2 image with Xcode command line tools | 50GB |
| `macos-sequoia-cua-sparse` | `latest`, `15.3` | macOS Sequoia 15.3 image compatible with the Computer interface | 80GB |
| `ubuntu-noble-vanilla-sparse` | `latest`, `24.04.1` | [Ubuntu Server for ARM 24.04.1 LTS](https://ubuntu.com/download/server/arm) with Ubuntu Desktop | 20GB |
For additional disk space, resize the VM disk after pulling the image using the `lume set <name> --disk-size <size>` command.
For additional disk space, resize the VM disk after pulling the image using the `lume set <name> --disk-size <size>` command. Note that the actual disk space used by sparse images will be much lower than the logical size listed.
## Local API Server

View File

@@ -193,6 +193,42 @@ curl --connect-timeout 6000 \
```
</details>
<details open>
<summary><strong>Push Image (Async)</strong> - POST /vms/push</summary>
```bash
# Push VM 'my-local-vm' to 'my-org/my-image:latest' and 'my-org/my-image:v1'
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
-H "Content-Type: application/json" \
-d '{
"name": "my-local-vm",
"imageName": "my-image",
"tags": ["latest", "v1"],
"organization": "my-org",
"registry": "ghcr.io",
"chunkSizeMb": 512,
"storage": null
}' \
http://localhost:3000/lume/vms/push
```
**Response (202 Accepted):**
```json
{
"message": "Push initiated in background",
"name": "my-local-vm",
"imageName": "my-image",
"tags": [
"latest",
"v1"
]
}
```
</details>
<details open>
<summary><strong>Clone VM</strong> - POST /vms/:name/clone</summary>

View File

@@ -1,205 +0,0 @@
#!/bin/bash
# Exit immediately if a command exits with a non-zero status
set -e
# Default parameters
organization=""
image_name=""
image_version=""
target_folder_path=""
# Parse the command line arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--organization)
organization="$2"
shift 2
;;
--image-name)
image_name="$2"
shift 2
;;
--image-version)
image_version="$2"
shift 2
;;
--target-folder-path)
target_folder_path="$2"
shift 2
;;
--help)
echo "Usage: $0 [options]"
echo "Options:"
echo " --organization <organization> : GitHub organization (required)"
echo " --image-name <name> : Name of the image to pull (required)"
echo " --image-version <version> : Version of the image to pull (required)"
echo " --target-folder-path <path> : Path where to extract the files (required)"
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
# Ensure required arguments
if [[ -z "$organization" || -z "$image_name" || -z "$image_version" || -z "$target_folder_path" ]]; then
echo "Error: Missing required arguments. Use --help for usage."
exit 1
fi
# Check and install required tools
for tool in "jq" "pv" "parallel"; do
if ! command -v "$tool" &> /dev/null; then
echo "$tool is not installed. Installing using Homebrew..."
if ! command -v brew &> /dev/null; then
echo "Homebrew is not installed. Please install Homebrew first: https://brew.sh/"
exit 1
fi
brew install "$tool"
fi
done
# Create target folder if it doesn't exist
mkdir -p "$target_folder_path"
# Create a temporary directory for processing files
work_dir=$(mktemp -d)
echo "Working directory: $work_dir"
trap 'rm -rf "$work_dir"' EXIT
# Registry details
REGISTRY="ghcr.io"
REPOSITORY="$organization/$image_name"
TAG="$image_version"
# Get anonymous token
echo "Getting authentication token..."
curl -s "https://$REGISTRY/token?service=ghcr.io&scope=repository:$REPOSITORY:pull" -o "$work_dir/token.json"
TOKEN=$(cat "$work_dir/token.json" | jq -r ".token")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
echo "Failed to obtain token"
exit 1
fi
echo "Token obtained successfully"
# Fetch manifest
echo "Fetching manifest..."
MANIFEST_RESPONSE=$(curl -s \
-H "Authorization: Bearer $TOKEN" \
-H "Accept: application/vnd.oci.image.manifest.v1+json" \
"https://$REGISTRY/v2/$REPOSITORY/manifests/$TAG")
echo "Processing manifest..."
# Create a directory for all files
cd "$work_dir"
# Create a download function for parallel execution
download_layer() {
local media_type="$1"
local digest="$2"
local output_file="$3"
echo "Downloading $output_file..."
curl -s -L \
-H "Authorization: Bearer $TOKEN" \
-H "Accept: $media_type" \
"https://$REGISTRY/v2/$REPOSITORY/blobs/$digest" | \
pv > "$output_file"
}
export -f download_layer
export TOKEN REGISTRY REPOSITORY
# Process layers and create download jobs
echo "$MANIFEST_RESPONSE" | jq -c '.layers[]' | while read -r layer; do
media_type=$(echo "$layer" | jq -r '.mediaType')
digest=$(echo "$layer" | jq -r '.digest')
# Skip empty layers
if [[ "$media_type" == "application/vnd.oci.empty.v1+json" ]]; then
continue
fi
# Extract part information if present
if [[ $media_type =~ part\.number=([0-9]+)\;part\.total=([0-9]+) ]]; then
part_num="${BASH_REMATCH[1]}"
total_parts="${BASH_REMATCH[2]}"
echo "Found part $part_num of $total_parts"
output_file="disk.img.part.$part_num"
else
case "$media_type" in
"application/vnd.oci.image.layer.v1.tar")
output_file="disk.img"
;;
"application/vnd.oci.image.config.v1+json")
output_file="config.json"
;;
"application/octet-stream")
output_file="nvram.bin"
;;
*)
echo "Unknown media type: $media_type"
continue
;;
esac
fi
# Add to download queue
echo "$media_type"$'\t'"$digest"$'\t'"$output_file" >> download_queue.txt
done
# Download all files in parallel
echo "Downloading files in parallel..."
parallel --colsep $'\t' -a download_queue.txt download_layer {1} {2} {3}
# Check if we have disk parts to reassemble
if ls disk.img.part.* 1> /dev/null 2>&1; then
echo "Found disk parts, reassembling..."
# Get total parts from the first part's filename
first_part=$(ls disk.img.part.* | head -n 1)
total_parts=$(echo "$MANIFEST_RESPONSE" | jq -r '.layers[] | select(.mediaType | contains("part.total")) | .mediaType' | grep -o 'part\.total=[0-9]*' | cut -d= -f2 | head -n 1)
echo "Total parts to reassemble: $total_parts"
# Concatenate parts in order
echo "Reassembling disk image..."
{
for i in $(seq 1 "$total_parts"); do
part_file="disk.img.part.$i"
if [ -f "$part_file" ]; then
cat "$part_file"
else
echo "Error: Missing part $i"
exit 1
fi
done
} | pv > "$target_folder_path/disk.img"
echo "Disk image reassembled successfully"
else
# If no parts found, just copy disk.img if it exists
if [ -f disk.img ]; then
echo "Copying disk image..."
pv disk.img > "$target_folder_path/disk.img"
fi
fi
# Copy config.json if it exists
if [ -f config.json ]; then
echo "Copying config.json..."
cp config.json "$target_folder_path/"
fi
# Copy nvram.bin if it exists
if [ -f nvram.bin ]; then
echo "Copying nvram.bin..."
cp nvram.bin "$target_folder_path/"
fi
echo "Download complete: Files extracted to $target_folder_path"

View File

@@ -1,208 +0,0 @@
#!/bin/bash
# Exit immediately if a command exits with a non-zero status
set -e
# Default parameters
organization=""
folder_path=""
image_name=""
image_versions=""
chunk_size="500M" # Default chunk size for splitting large files
# Parse the command line arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--organization)
organization="$2"
shift 2
;;
--folder-path)
folder_path="$2"
shift 2
;;
--image-name)
image_name="$2"
shift 2
;;
--image-versions)
image_versions="$2"
shift 2
;;
--chunk-size)
chunk_size="$2"
shift 2
;;
--help)
echo "Usage: $0 [options]"
echo "Options:"
echo " --organization <organization> : GitHub organization (required if not using token)"
echo " --folder-path <path> : Path to the folder to upload (required)"
echo " --image-name <name> : Name of the image to publish (required)"
echo " --image-versions <versions> : Comma separated list of versions of the image to publish (required)"
echo " --chunk-size <size> : Size of chunks for large files (e.g., 500M, default: 500M)"
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
# Ensure required arguments
if [[ -z "$organization" || -z "$folder_path" || -z "$image_name" || -z "$image_versions" ]]; then
echo "Error: Missing required arguments. Use --help for usage."
exit 1
fi
# Check if the GITHUB_TOKEN variable is set
if [[ -z "$GITHUB_TOKEN" ]]; then
echo "Error: GITHUB_TOKEN is not set."
exit 1
fi
# Ensure the folder exists
if [[ ! -d "$folder_path" ]]; then
echo "Error: Folder $folder_path does not exist."
exit 1
fi
# Check and install required tools
for tool in "oras" "split" "pv" "gzip"; do
if ! command -v "$tool" &> /dev/null; then
echo "$tool is not installed. Installing using Homebrew..."
if ! command -v brew &> /dev/null; then
echo "Homebrew is not installed. Please install Homebrew first: https://brew.sh/"
exit 1
fi
brew install "$tool"
fi
done
# Authenticate with GitHub Container Registry
echo "$GITHUB_TOKEN" | oras login ghcr.io -u "$organization" --password-stdin
# Create a temporary directory for processing files
work_dir=$(mktemp -d)
echo "Working directory: $work_dir"
trap 'rm -rf "$work_dir"' EXIT
# Create a directory for all files
mkdir -p "$work_dir/files"
cd "$work_dir/files"
# Copy config.json if it exists
if [ -f "$folder_path/config.json" ]; then
echo "Copying config.json..."
cp "$folder_path/config.json" config.json
fi
# Copy nvram.bin if it exists
nvram_bin="$folder_path/nvram.bin"
if [ -f "$nvram_bin" ]; then
echo "Copying nvram.bin..."
cp "$nvram_bin" nvram.bin
fi
# Process disk.img if it exists and needs splitting
disk_img="$folder_path/disk.img"
if [ -f "$disk_img" ]; then
file_size=$(stat -f%z "$disk_img")
if [ $file_size -gt 524288000 ]; then # 500MB in bytes
echo "Splitting large file: disk.img"
echo "Original disk.img size: $(du -h "$disk_img" | cut -f1)"
# Copy and split the file with progress monitoring
echo "Copying disk image..."
pv "$disk_img" > disk.img
echo "Splitting file..."
split -b "$chunk_size" disk.img disk.img.part.
rm disk.img
# Get original file size for verification
original_size=$(stat -f%z "$disk_img")
echo "Original disk.img size: $(awk -v size=$original_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
# Verify split parts total size
total_size=0
total_parts=$(ls disk.img.part.* | wc -l | tr -d ' ')
part_num=0
# Create array for files and their annotations
files=()
for part in disk.img.part.*; do
part_size=$(stat -f%z "$part")
total_size=$((total_size + part_size))
part_num=$((part_num + 1))
echo "Part $part: $(awk -v size=$part_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
files+=("$part:application/vnd.oci.image.layer.v1.tar;part.number=$part_num;part.total=$total_parts")
done
echo "Total size of parts: $(awk -v size=$total_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
# Verify total size matches original
if [ $total_size -ne $original_size ]; then
echo "ERROR: Size mismatch!"
echo "Original file size: $(awk -v size=$original_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
echo "Sum of parts size: $(awk -v size=$total_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
echo "Difference: $(awk -v orig=$original_size -v total=$total_size 'BEGIN {printf "%.2f GB", (orig-total)/1024/1024/1024}')"
exit 1
fi
# Add remaining files
if [ -f "config.json" ]; then
files+=("config.json:application/vnd.oci.image.config.v1+json")
fi
if [ -f "nvram.bin" ]; then
files+=("nvram.bin:application/octet-stream")
fi
# Push versions in parallel
push_pids=()
for version in $image_versions; do
(
echo "Pushing version $version..."
oras push --disable-path-validation \
"ghcr.io/$organization/$image_name:$version" \
"${files[@]}"
echo "Completed push for version $version"
) &
push_pids+=($!)
done
# Wait for all pushes to complete
for pid in "${push_pids[@]}"; do
wait "$pid"
done
else
# Push disk.img directly if it's small enough
echo "Copying disk image..."
pv "$disk_img" > disk.img
# Push all files together
echo "Pushing all files..."
files=("disk.img:application/vnd.oci.image.layer.v1.tar")
if [ -f "config.json" ]; then
files+=("config.json:application/vnd.oci.image.config.v1+json")
fi
if [ -f "nvram.bin" ]; then
files+=("nvram.bin:application/octet-stream")
fi
for version in $image_versions; do
# Push all files in one command
oras push --disable-path-validation \
"ghcr.io/$organization/$image_name:$version" \
"${files[@]}"
done
fi
fi
for version in $image_versions; do
echo "Upload complete: ghcr.io/$organization/$image_name:$version"
done

View File

@@ -0,0 +1,74 @@
import ArgumentParser
import Foundation
struct Push: AsyncParsableCommand {
static let configuration = CommandConfiguration(
abstract: "Push a macOS VM to GitHub Container Registry"
)
@Argument(help: "Name of the VM to push")
var name: String
@Argument(help: "Image tag to push (format: name:tag)")
var image: String
@Option(parsing: .upToNextOption, help: "Additional tags to push the same image to")
var additionalTags: [String] = []
@Option(help: "Github Container Registry to push to. Defaults to ghcr.io")
var registry: String = "ghcr.io"
@Option(help: "Organization to push to. Defaults to trycua")
var organization: String = "trycua"
@Option(name: .customLong("storage"), help: "VM storage location to use")
var storage: String?
@Option(help: "Chunk size for large files in MB. Defaults to 512.")
var chunkSizeMb: Int = 512
@Flag(name: .long, help: "Enable verbose logging")
var verbose: Bool = false
@Flag(name: .long, help: "Prepare files without uploading to registry")
var dryRun: Bool = false
@Flag(name: .long, help: "In dry-run mode, also reassemble chunks to verify integrity")
var reassemble: Bool = true
init() {}
@MainActor
func run() async throws {
let controller = LumeController()
// Parse primary image name and tag
let components = image.split(separator: ":")
guard components.count == 2, let primaryTag = components.last else {
throw ValidationError("Invalid primary image format. Expected format: name:tag")
}
let imageName = String(components.first!)
// Combine primary and additional tags, ensuring uniqueness
var allTags: Swift.Set<String> = []
allTags.insert(String(primaryTag))
allTags.formUnion(additionalTags)
guard !allTags.isEmpty else {
throw ValidationError("At least one tag must be provided.")
}
try await controller.pushImage(
name: name,
imageName: imageName, // Pass base image name
tags: Array(allTags), // Pass array of all unique tags
registry: registry,
organization: organization,
storage: storage,
chunkSizeMb: chunkSizeMb,
verbose: verbose,
dryRun: dryRun,
reassemble: reassemble
)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,15 @@ enum HomeError: Error, LocalizedError {
case directoryAccessDenied(path: String)
case invalidHomeDirectory
case directoryAlreadyExists(path: String)
case homeNotFound
case defaultStorageNotDefined
case storageLocationNotFound(String)
case storageLocationNotADirectory(String)
case storageLocationNotWritable(String)
case invalidStorageLocation(String)
case cannotCreateDirectory(String)
case cannotGetVMsDirectory
case vmDirectoryNotFound(String)
var errorDescription: String? {
switch self {
@@ -16,6 +25,24 @@ enum HomeError: Error, LocalizedError {
return "Invalid home directory configuration"
case .directoryAlreadyExists(let path):
return "Directory already exists at path: \(path)"
case .homeNotFound:
return "Home directory not found."
case .defaultStorageNotDefined:
return "Default storage location is not defined."
case .storageLocationNotFound(let path):
return "Storage location not found: \(path)"
case .storageLocationNotADirectory(let path):
return "Storage location is not a directory: \(path)"
case .storageLocationNotWritable(let path):
return "Storage location is not writable: \(path)"
case .invalidStorageLocation(let path):
return "Invalid storage location specified: \(path)"
case .cannotCreateDirectory(let path):
return "Cannot create directory: \(path)"
case .cannotGetVMsDirectory:
return "Cannot determine the VMs directory."
case .vmDirectoryNotFound(let path):
return "VM directory not found: \(path)"
}
}
}
@@ -28,23 +55,35 @@ enum PullError: Error, LocalizedError {
case missingPart(Int)
case decompressionFailed(String)
case reassemblyFailed(String)
case fileCreationFailed(String)
case reassemblySetupFailed(path: String, underlyingError: Error)
case missingUncompressedSizeAnnotation
case invalidMediaType
var errorDescription: String? {
switch self {
case .invalidImageFormat:
return "Invalid image format. Expected format: name:tag"
case .tokenFetchFailed:
return "Failed to obtain authentication token"
return "Failed to fetch authentication token from registry."
case .manifestFetchFailed:
return "Failed to fetch manifest"
return "Failed to fetch image manifest from registry."
case .layerDownloadFailed(let digest):
return "Failed to download layer: \(digest)"
case .missingPart(let number):
return "Missing disk image part \(number)"
case .decompressionFailed(let filename):
return "Failed to decompress file: \(filename)"
case .missingPart(let partNum):
return "Missing required part number \(partNum) for reassembly."
case .decompressionFailed(let file):
return "Failed to decompress file: \(file)"
case .reassemblyFailed(let reason):
return "Disk image reassembly failed: \(reason)."
case .fileCreationFailed(let path):
return "Failed to create the necessary file at path: \(path)"
case .reassemblySetupFailed(let path, let underlyingError):
return "Failed to set up for reassembly at path: \(path). Underlying error: \(underlyingError.localizedDescription)"
case .missingUncompressedSizeAnnotation:
return "Could not find the required uncompressed disk size annotation in the image config.json."
case .invalidMediaType:
return "Invalid media type"
}
}
}
@@ -165,4 +204,24 @@ enum VMError: Error, LocalizedError {
return "Invalid display resolution: \(resolution)"
}
}
}
enum ResticError: Error {
case snapshotFailed(String)
case restoreFailed(String)
case genericError(String)
}
enum VmrunError: Error, LocalizedError {
case commandNotFound
case operationFailed(command: String, output: String?)
var errorDescription: String? {
switch self {
case .commandNotFound:
return "vmrun command not found. Ensure VMware Fusion is installed and in the system PATH."
case .operationFailed(let command, let output):
return "vmrun command '\(command)' failed. Output: \(output ?? "No output")"
}
}
}

View File

@@ -452,6 +452,77 @@ final class LumeController {
}
}
@MainActor
public func pushImage(
name: String,
imageName: String,
tags: [String],
registry: String,
organization: String,
storage: String? = nil,
chunkSizeMb: Int = 512,
verbose: Bool = false,
dryRun: Bool = false,
reassemble: Bool = false
) async throws {
do {
Logger.info(
"Pushing VM to registry",
metadata: [
"name": name,
"imageName": imageName,
"tags": "\(tags.joined(separator: ", "))",
"registry": registry,
"organization": organization,
"location": storage ?? "default",
"chunk_size": "\(chunkSizeMb)MB",
"dry_run": "\(dryRun)",
"reassemble": "\(reassemble)"
])
try validatePushParameters(
name: name,
imageName: imageName,
tags: tags,
registry: registry,
organization: organization
)
// Find the actual location of the VM
let actualLocation = try self.validateVMExists(name, storage: storage)
// Get the VM directory
let vmDir = try home.getVMDirectory(name, storage: actualLocation)
// Use ImageContainerRegistry to push the VM
let imageContainerRegistry = ImageContainerRegistry(
registry: registry, organization: organization)
try await imageContainerRegistry.push(
vmDirPath: vmDir.dir.path,
imageName: imageName,
tags: tags,
chunkSizeMb: chunkSizeMb,
verbose: verbose,
dryRun: dryRun,
reassemble: reassemble
)
Logger.info(
"VM pushed successfully",
metadata: [
"name": name,
"imageName": imageName,
"tags": "\(tags.joined(separator: ", "))",
"registry": registry,
"organization": organization,
])
} catch {
Logger.error("Failed to push VM", metadata: ["error": error.localizedDescription])
throw error
}
}
@MainActor
public func pruneImages() async throws {
Logger.info("Pruning cached images")
@@ -755,4 +826,31 @@ final class LumeController {
break
}
}
private func validatePushParameters(
name: String,
imageName: String,
tags: [String],
registry: String,
organization: String
) throws {
guard !name.isEmpty else {
throw ValidationError("VM name cannot be empty")
}
guard !imageName.isEmpty else {
throw ValidationError("Image name cannot be empty")
}
guard !tags.isEmpty else {
throw ValidationError("At least one tag must be provided.")
}
guard !registry.isEmpty else {
throw ValidationError("Registry cannot be empty")
}
guard !organization.isEmpty else {
throw ValidationError("Organization cannot be empty")
}
// Verify VM exists (this will throw if not found)
_ = try self.validateVMExists(name)
}
}

View File

@@ -288,6 +288,54 @@ extension Server {
}
}
func handlePush(_ body: Data?) async throws -> HTTPResponse {
guard let body = body,
let request = try? JSONDecoder().decode(PushRequest.self, from: body)
else {
return HTTPResponse(
statusCode: .badRequest,
headers: ["Content-Type": "application/json"],
body: try JSONEncoder().encode(APIError(message: "Invalid request body"))
)
}
// Trigger push asynchronously, return Accepted immediately
Task.detached { @MainActor @Sendable in
do {
let vmController = LumeController()
try await vmController.pushImage(
name: request.name,
imageName: request.imageName,
tags: request.tags,
registry: request.registry,
organization: request.organization,
storage: request.storage,
chunkSizeMb: request.chunkSizeMb,
verbose: false, // Verbose typically handled by server logs
dryRun: false, // Default API behavior is likely non-dry-run
reassemble: false // Default API behavior is likely non-reassemble
)
Logger.info("Background push completed successfully for image: \(request.imageName):\(request.tags.joined(separator: ","))")
} catch {
Logger.error(
"Background push failed for image: \(request.imageName):\(request.tags.joined(separator: ","))",
metadata: ["error": error.localizedDescription]
)
}
}
return HTTPResponse(
statusCode: .accepted,
headers: ["Content-Type": "application/json"],
body: try JSONEncoder().encode([
"message": AnyEncodable("Push initiated in background"),
"name": AnyEncodable(request.name),
"imageName": AnyEncodable(request.imageName),
"tags": AnyEncodable(request.tags),
])
)
}
func handleGetImages(_ request: HTTPRequest) async throws -> HTTPResponse {
let pathAndQuery = request.path.split(separator: "?", maxSplits: 1)
let queryParams =

View File

@@ -102,3 +102,31 @@ struct CloneRequest: Codable {
let sourceLocation: String?
let destLocation: String?
}
struct PushRequest: Codable {
let name: String // Name of the local VM
let imageName: String // Base name for the image in the registry
let tags: [String] // List of tags to push
var registry: String // Registry URL
var organization: String // Organization/user in the registry
let storage: String? // Optional VM storage location
var chunkSizeMb: Int // Chunk size
// dryRun and reassemble are less common for API, default to false?
// verbose is usually handled by server logging
enum CodingKeys: String, CodingKey {
case name, imageName, tags, registry, organization, storage, chunkSizeMb
}
// Provide default values for optional fields during decoding
init(from decoder: Decoder) throws {
let container = try decoder.container(keyedBy: CodingKeys.self)
name = try container.decode(String.self, forKey: .name)
imageName = try container.decode(String.self, forKey: .imageName)
tags = try container.decode([String].self, forKey: .tags)
registry = try container.decodeIfPresent(String.self, forKey: .registry) ?? "ghcr.io"
organization = try container.decodeIfPresent(String.self, forKey: .organization) ?? "trycua"
storage = try container.decodeIfPresent(String.self, forKey: .storage)
chunkSizeMb = try container.decodeIfPresent(Int.self, forKey: .chunkSizeMb) ?? 512
}
}

View File

@@ -4,6 +4,19 @@ struct APIError: Codable {
let message: String
}
// Helper struct to encode mixed-type dictionaries
struct AnyEncodable: Encodable {
private let value: Encodable
init(_ value: Encodable) {
self.value = value
}
func encode(to encoder: Encoder) throws {
try value.encode(to: encoder)
}
}
extension HTTPResponse {
static func json<T: Encodable>(_ value: T) throws -> HTTPResponse {
let data = try JSONEncoder().encode(value)

View File

@@ -261,6 +261,12 @@ final class Server {
}
return try await self.handleSetDefaultLocation(name)
}),
Route(
method: "POST", path: "/vms/push",
handler: { [weak self] request in
guard let self else { throw HTTPError.internalError }
return try await self.handlePush(request.body)
}),
]
}

View File

@@ -5,6 +5,7 @@ enum CommandRegistry {
[
Create.self,
Pull.self,
Push.self,
Images.self,
Clone.self,
Get.self,