mirror of
https://github.com/trycua/computer.git
synced 2026-01-04 12:30:08 -06:00
Add Cua Preview
This commit is contained in:
117
scripts/build.sh
Executable file
117
scripts/build.sh
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on error
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print step information
|
||||
print_step() {
|
||||
echo -e "${BLUE}==> $1${NC}"
|
||||
}
|
||||
|
||||
# Function to print success message
|
||||
print_success() {
|
||||
echo -e "${GREEN}==> Success: $1${NC}"
|
||||
}
|
||||
|
||||
# Function to print error message
|
||||
print_error() {
|
||||
echo -e "${RED}==> Error: $1${NC}" >&2
|
||||
}
|
||||
|
||||
# Get the script's directory
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_ROOT="$( cd "${SCRIPT_DIR}/.." && pwd )"
|
||||
|
||||
# Change to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Load environment variables from .env.local
|
||||
if [ -f .env.local ]; then
|
||||
print_step "Loading environment variables from .env.local..."
|
||||
set -a
|
||||
source .env.local
|
||||
set +a
|
||||
print_success "Environment variables loaded"
|
||||
else
|
||||
print_error ".env.local file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean up existing environments and cache
|
||||
print_step "Cleaning up existing environments..."
|
||||
find . -type d -name "__pycache__" -exec rm -rf {} +
|
||||
find . -type d -name ".pytest_cache" -exec rm -rf {} +
|
||||
find . -type d -name "dist" -exec rm -rf {} +
|
||||
find . -type d -name ".venv" -exec rm -rf {} +
|
||||
find . -type d -name "*.egg-info" -exec rm -rf {} +
|
||||
print_success "Environment cleanup complete"
|
||||
|
||||
# Create and activate virtual environment
|
||||
print_step "Creating virtual environment..."
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
|
||||
# Upgrade pip and install build tools
|
||||
print_step "Upgrading pip and installing build tools..."
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
|
||||
# Function to install a package and its dependencies
|
||||
install_package() {
|
||||
local package_dir=$1
|
||||
local package_name=$2
|
||||
local extras=$3
|
||||
print_step "Installing ${package_name}..."
|
||||
cd "$package_dir"
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
if [ -n "$extras" ]; then
|
||||
pip install -e ".[${extras}]"
|
||||
else
|
||||
pip install -e .
|
||||
fi
|
||||
else
|
||||
print_error "No pyproject.toml found in ${package_dir}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
}
|
||||
|
||||
# Install packages in order of dependency
|
||||
print_step "Installing packages in development mode..."
|
||||
|
||||
# Install core first (base package with telemetry support)
|
||||
install_package "libs/core" "core"
|
||||
|
||||
# Install pylume (base dependency)
|
||||
install_package "libs/pylume" "pylume"
|
||||
|
||||
# Install computer (depends on pylume)
|
||||
install_package "libs/computer" "computer"
|
||||
|
||||
# Install omniparser
|
||||
install_package "libs/som" "som"
|
||||
|
||||
# Install agent with all its dependencies and extras
|
||||
install_package "libs/agent" "agent" "all"
|
||||
|
||||
# Install computer-server
|
||||
install_package "libs/computer-server" "computer-server"
|
||||
|
||||
# Install development tools from root project
|
||||
print_step "Installing development dependencies..."
|
||||
pip install -e ".[dev,test,docs]"
|
||||
|
||||
# Create a .env file for VS Code to use the virtual environment
|
||||
print_step "Creating .env file for VS Code..."
|
||||
echo "PYTHONPATH=${PROJECT_ROOT}/libs/core:${PROJECT_ROOT}/libs/computer:${PROJECT_ROOT}/libs/agent:${PROJECT_ROOT}/libs/som:${PROJECT_ROOT}/libs/pylume:${PROJECT_ROOT}/libs/computer-server" > .env
|
||||
|
||||
print_success "All packages installed successfully!"
|
||||
print_step "Your virtual environment is ready. To activate it:"
|
||||
echo " source .venv/bin/activate"
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
swift build --product lume
|
||||
codesign --force --entitlement resources/lume.entitlements --sign - .build/debug/lume
|
||||
@@ -1,99 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Check required environment variables
|
||||
required_vars=(
|
||||
"CERT_APPLICATION_NAME"
|
||||
"CERT_INSTALLER_NAME"
|
||||
"APPLE_ID"
|
||||
"TEAM_ID"
|
||||
"APP_SPECIFIC_PASSWORD"
|
||||
)
|
||||
|
||||
for var in "${required_vars[@]}"; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "Error: $var is not set"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Move to the project root directory
|
||||
pushd ../../
|
||||
|
||||
# Build the release version
|
||||
swift build -c release --product lume
|
||||
|
||||
# Sign the binary with hardened runtime entitlements
|
||||
codesign --force --options runtime \
|
||||
--entitlement ./resources/lume.entitlements \
|
||||
--sign "$CERT_APPLICATION_NAME" \
|
||||
.build/release/lume
|
||||
|
||||
# Create a temporary directory for packaging
|
||||
TEMP_ROOT=$(mktemp -d)
|
||||
mkdir -p "$TEMP_ROOT/usr/local/bin"
|
||||
cp -f .build/release/lume "$TEMP_ROOT/usr/local/bin/"
|
||||
|
||||
# Build the installer package
|
||||
pkgbuild --root "$TEMP_ROOT" \
|
||||
--identifier "com.trycua.lume" \
|
||||
--version "1.0" \
|
||||
--install-location "/" \
|
||||
--sign "$CERT_INSTALLER_NAME" \
|
||||
./.release/lume.pkg
|
||||
|
||||
# Submit for notarization using stored credentials
|
||||
xcrun notarytool submit ./.release/lume.pkg \
|
||||
--apple-id "${APPLE_ID}" \
|
||||
--team-id "${TEAM_ID}" \
|
||||
--password "${APP_SPECIFIC_PASSWORD}" \
|
||||
--wait
|
||||
|
||||
# Staple the notarization ticket
|
||||
xcrun stapler staple ./.release/lume.pkg
|
||||
|
||||
# Create temporary directory for package extraction
|
||||
EXTRACT_ROOT=$(mktemp -d)
|
||||
PKG_PATH="$(pwd)/.release/lume.pkg"
|
||||
|
||||
# Extract the pkg using xar
|
||||
cd "$EXTRACT_ROOT"
|
||||
xar -xf "$PKG_PATH"
|
||||
|
||||
# Verify Payload exists before proceeding
|
||||
if [ ! -f "Payload" ]; then
|
||||
echo "Error: Payload file not found after xar extraction"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a directory for the extracted contents
|
||||
mkdir -p extracted
|
||||
cd extracted
|
||||
|
||||
# Extract the Payload
|
||||
cat ../Payload | gunzip -dc | cpio -i
|
||||
|
||||
# Verify the binary exists
|
||||
if [ ! -f "usr/local/bin/lume" ]; then
|
||||
echo "Error: lume binary not found in expected location"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy extracted lume to ./.release/lume
|
||||
cp -f usr/local/bin/lume "$(dirname "$PKG_PATH")/lume"
|
||||
|
||||
# Create symbolic link in /usr/local/bin
|
||||
cd "$(dirname "$PKG_PATH")"
|
||||
sudo ln -sf "$(pwd)/lume" /usr/local/bin/lume
|
||||
|
||||
# Create zip archive of the package
|
||||
tar -czvf lume.tar.gz lume
|
||||
tar -czvf lume.pkg.tar.gz lume.pkg
|
||||
|
||||
# Create sha256 checksum for the lume tarball
|
||||
shasum -a 256 lume.tar.gz
|
||||
|
||||
popd
|
||||
|
||||
# Clean up
|
||||
rm -rf "$TEMP_ROOT"
|
||||
rm -rf "$EXTRACT_ROOT"
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
pushd ../../
|
||||
|
||||
swift build -c release --product lume
|
||||
codesign --force --entitlement ./resources/lume.entitlements --sign - .build/release/lume
|
||||
|
||||
mkdir -p ./.release
|
||||
cp -f .build/release/lume ./.release/lume
|
||||
|
||||
# Create symbolic link in /usr/local/bin
|
||||
sudo mkdir -p /usr/local/bin
|
||||
sudo ln -sf "$(pwd)/.release/lume" /usr/local/bin/lume
|
||||
|
||||
popd
|
||||
85
scripts/cleanup.sh
Executable file
85
scripts/cleanup.sh
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on error
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print step information
|
||||
print_step() {
|
||||
echo -e "${BLUE}==> $1${NC}"
|
||||
}
|
||||
|
||||
# Function to print success message
|
||||
print_success() {
|
||||
echo -e "${GREEN}==> Success: $1${NC}"
|
||||
}
|
||||
|
||||
# Function to print error message
|
||||
print_error() {
|
||||
echo -e "${RED}==> Error: $1${NC}" >&2
|
||||
}
|
||||
|
||||
# Get the script's directory
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_ROOT="$SCRIPT_DIR/.."
|
||||
|
||||
# Change to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
print_step "Starting cleanup of all caches and virtual environments..."
|
||||
|
||||
# Remove all virtual environments
|
||||
print_step "Removing virtual environments..."
|
||||
find . -type d -name ".venv" -exec rm -rf {} +
|
||||
print_success "Virtual environments removed"
|
||||
|
||||
# Remove all Python cache files and directories
|
||||
print_step "Removing Python cache files and directories..."
|
||||
find . -type d -name "__pycache__" -exec rm -rf {} +
|
||||
find . -type d -name ".pytest_cache" -exec rm -rf {} +
|
||||
find . -type d -name ".mypy_cache" -exec rm -rf {} +
|
||||
find . -type d -name ".ruff_cache" -exec rm -rf {} +
|
||||
find . -name "*.pyc" -delete
|
||||
find . -name "*.pyo" -delete
|
||||
find . -name "*.pyd" -delete
|
||||
print_success "Python cache files removed"
|
||||
|
||||
# Remove all build artifacts
|
||||
print_step "Removing build artifacts..."
|
||||
find . -type d -name "build" -exec rm -rf {} +
|
||||
find . -type d -name "dist" -exec rm -rf {} +
|
||||
find . -type d -name "*.egg-info" -exec rm -rf {} +
|
||||
find . -type d -name "*.egg" -exec rm -rf {} +
|
||||
print_success "Build artifacts removed"
|
||||
|
||||
# Remove PDM-related files and directories
|
||||
print_step "Removing PDM-related files and directories..."
|
||||
find . -name "pdm.lock" -delete
|
||||
find . -type d -name ".pdm-build" -exec rm -rf {} +
|
||||
find . -name ".pdm-python" -delete # .pdm-python is a file, not a directory
|
||||
print_success "PDM-related files removed"
|
||||
|
||||
# Remove .env file
|
||||
print_step "Removing .env file..."
|
||||
rm -f .env
|
||||
print_success ".env file removed"
|
||||
|
||||
# Remove typings directory
|
||||
print_step "Removing typings directory..."
|
||||
rm -rf .vscode/typings
|
||||
print_success "Typings directory removed"
|
||||
|
||||
# Clean up any temporary files
|
||||
print_step "Removing temporary files..."
|
||||
find . -name "*.tmp" -delete
|
||||
find . -name "*.bak" -delete
|
||||
find . -name "*.swp" -delete
|
||||
print_success "Temporary files removed"
|
||||
|
||||
print_success "Cleanup complete! All caches and virtual environments have been removed."
|
||||
print_step "To rebuild the project, run: bash scripts/build.sh"
|
||||
@@ -1,205 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status
|
||||
set -e
|
||||
|
||||
# Default parameters
|
||||
organization=""
|
||||
image_name=""
|
||||
image_version=""
|
||||
target_folder_path=""
|
||||
|
||||
# Parse the command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--organization)
|
||||
organization="$2"
|
||||
shift 2
|
||||
;;
|
||||
--image-name)
|
||||
image_name="$2"
|
||||
shift 2
|
||||
;;
|
||||
--image-version)
|
||||
image_version="$2"
|
||||
shift 2
|
||||
;;
|
||||
--target-folder-path)
|
||||
target_folder_path="$2"
|
||||
shift 2
|
||||
;;
|
||||
--help)
|
||||
echo "Usage: $0 [options]"
|
||||
echo "Options:"
|
||||
echo " --organization <organization> : GitHub organization (required)"
|
||||
echo " --image-name <name> : Name of the image to pull (required)"
|
||||
echo " --image-version <version> : Version of the image to pull (required)"
|
||||
echo " --target-folder-path <path> : Path where to extract the files (required)"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Ensure required arguments
|
||||
if [[ -z "$organization" || -z "$image_name" || -z "$image_version" || -z "$target_folder_path" ]]; then
|
||||
echo "Error: Missing required arguments. Use --help for usage."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check and install required tools
|
||||
for tool in "jq" "pv" "parallel"; do
|
||||
if ! command -v "$tool" &> /dev/null; then
|
||||
echo "$tool is not installed. Installing using Homebrew..."
|
||||
if ! command -v brew &> /dev/null; then
|
||||
echo "Homebrew is not installed. Please install Homebrew first: https://brew.sh/"
|
||||
exit 1
|
||||
fi
|
||||
brew install "$tool"
|
||||
fi
|
||||
done
|
||||
|
||||
# Create target folder if it doesn't exist
|
||||
mkdir -p "$target_folder_path"
|
||||
|
||||
# Create a temporary directory for processing files
|
||||
work_dir=$(mktemp -d)
|
||||
echo "Working directory: $work_dir"
|
||||
trap 'rm -rf "$work_dir"' EXIT
|
||||
|
||||
# Registry details
|
||||
REGISTRY="ghcr.io"
|
||||
REPOSITORY="$organization/$image_name"
|
||||
TAG="$image_version"
|
||||
|
||||
# Get anonymous token
|
||||
echo "Getting authentication token..."
|
||||
curl -s "https://$REGISTRY/token?service=ghcr.io&scope=repository:$REPOSITORY:pull" -o "$work_dir/token.json"
|
||||
TOKEN=$(cat "$work_dir/token.json" | jq -r ".token")
|
||||
|
||||
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
|
||||
echo "Failed to obtain token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Token obtained successfully"
|
||||
|
||||
# Fetch manifest
|
||||
echo "Fetching manifest..."
|
||||
MANIFEST_RESPONSE=$(curl -s \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: application/vnd.oci.image.manifest.v1+json" \
|
||||
"https://$REGISTRY/v2/$REPOSITORY/manifests/$TAG")
|
||||
|
||||
echo "Processing manifest..."
|
||||
|
||||
# Create a directory for all files
|
||||
cd "$work_dir"
|
||||
|
||||
# Create a download function for parallel execution
|
||||
download_layer() {
|
||||
local media_type="$1"
|
||||
local digest="$2"
|
||||
local output_file="$3"
|
||||
|
||||
echo "Downloading $output_file..."
|
||||
curl -s -L \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: $media_type" \
|
||||
"https://$REGISTRY/v2/$REPOSITORY/blobs/$digest" | \
|
||||
pv > "$output_file"
|
||||
}
|
||||
export -f download_layer
|
||||
export TOKEN REGISTRY REPOSITORY
|
||||
|
||||
# Process layers and create download jobs
|
||||
echo "$MANIFEST_RESPONSE" | jq -c '.layers[]' | while read -r layer; do
|
||||
media_type=$(echo "$layer" | jq -r '.mediaType')
|
||||
digest=$(echo "$layer" | jq -r '.digest')
|
||||
|
||||
# Skip empty layers
|
||||
if [[ "$media_type" == "application/vnd.oci.empty.v1+json" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Extract part information if present
|
||||
if [[ $media_type =~ part\.number=([0-9]+)\;part\.total=([0-9]+) ]]; then
|
||||
part_num="${BASH_REMATCH[1]}"
|
||||
total_parts="${BASH_REMATCH[2]}"
|
||||
echo "Found part $part_num of $total_parts"
|
||||
output_file="disk.img.part.$part_num"
|
||||
else
|
||||
case "$media_type" in
|
||||
"application/vnd.oci.image.layer.v1.tar")
|
||||
output_file="disk.img"
|
||||
;;
|
||||
"application/vnd.oci.image.config.v1+json")
|
||||
output_file="config.json"
|
||||
;;
|
||||
"application/octet-stream")
|
||||
output_file="nvram.bin"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown media type: $media_type"
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Add to download queue
|
||||
echo "$media_type"$'\t'"$digest"$'\t'"$output_file" >> download_queue.txt
|
||||
done
|
||||
|
||||
# Download all files in parallel
|
||||
echo "Downloading files in parallel..."
|
||||
parallel --colsep $'\t' -a download_queue.txt download_layer {1} {2} {3}
|
||||
|
||||
# Check if we have disk parts to reassemble
|
||||
if ls disk.img.part.* 1> /dev/null 2>&1; then
|
||||
echo "Found disk parts, reassembling..."
|
||||
|
||||
# Get total parts from the first part's filename
|
||||
first_part=$(ls disk.img.part.* | head -n 1)
|
||||
total_parts=$(echo "$MANIFEST_RESPONSE" | jq -r '.layers[] | select(.mediaType | contains("part.total")) | .mediaType' | grep -o 'part\.total=[0-9]*' | cut -d= -f2 | head -n 1)
|
||||
|
||||
echo "Total parts to reassemble: $total_parts"
|
||||
|
||||
# Concatenate parts in order
|
||||
echo "Reassembling disk image..."
|
||||
{
|
||||
for i in $(seq 1 "$total_parts"); do
|
||||
part_file="disk.img.part.$i"
|
||||
if [ -f "$part_file" ]; then
|
||||
cat "$part_file"
|
||||
else
|
||||
echo "Error: Missing part $i"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
} | pv > "$target_folder_path/disk.img"
|
||||
|
||||
echo "Disk image reassembled successfully"
|
||||
else
|
||||
# If no parts found, just copy disk.img if it exists
|
||||
if [ -f disk.img ]; then
|
||||
echo "Copying disk image..."
|
||||
pv disk.img > "$target_folder_path/disk.img"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy config.json if it exists
|
||||
if [ -f config.json ]; then
|
||||
echo "Copying config.json..."
|
||||
cp config.json "$target_folder_path/"
|
||||
fi
|
||||
|
||||
# Copy nvram.bin if it exists
|
||||
if [ -f nvram.bin ]; then
|
||||
echo "Copying nvram.bin..."
|
||||
cp nvram.bin "$target_folder_path/"
|
||||
fi
|
||||
|
||||
echo "Download complete: Files extracted to $target_folder_path"
|
||||
@@ -1,208 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status
|
||||
set -e
|
||||
|
||||
# Default parameters
|
||||
organization=""
|
||||
folder_path=""
|
||||
image_name=""
|
||||
image_versions=""
|
||||
chunk_size="500M" # Default chunk size for splitting large files
|
||||
|
||||
# Parse the command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--organization)
|
||||
organization="$2"
|
||||
shift 2
|
||||
;;
|
||||
--folder-path)
|
||||
folder_path="$2"
|
||||
shift 2
|
||||
;;
|
||||
--image-name)
|
||||
image_name="$2"
|
||||
shift 2
|
||||
;;
|
||||
--image-versions)
|
||||
image_versions="$2"
|
||||
shift 2
|
||||
;;
|
||||
--chunk-size)
|
||||
chunk_size="$2"
|
||||
shift 2
|
||||
;;
|
||||
--help)
|
||||
echo "Usage: $0 [options]"
|
||||
echo "Options:"
|
||||
echo " --organization <organization> : GitHub organization (required if not using token)"
|
||||
echo " --folder-path <path> : Path to the folder to upload (required)"
|
||||
echo " --image-name <name> : Name of the image to publish (required)"
|
||||
echo " --image-versions <versions> : Comma separated list of versions of the image to publish (required)"
|
||||
echo " --chunk-size <size> : Size of chunks for large files (e.g., 500M, default: 500M)"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Ensure required arguments
|
||||
if [[ -z "$organization" || -z "$folder_path" || -z "$image_name" || -z "$image_versions" ]]; then
|
||||
echo "Error: Missing required arguments. Use --help for usage."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the GITHUB_TOKEN variable is set
|
||||
if [[ -z "$GITHUB_TOKEN" ]]; then
|
||||
echo "Error: GITHUB_TOKEN is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure the folder exists
|
||||
if [[ ! -d "$folder_path" ]]; then
|
||||
echo "Error: Folder $folder_path does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check and install required tools
|
||||
for tool in "oras" "split" "pv" "gzip"; do
|
||||
if ! command -v "$tool" &> /dev/null; then
|
||||
echo "$tool is not installed. Installing using Homebrew..."
|
||||
if ! command -v brew &> /dev/null; then
|
||||
echo "Homebrew is not installed. Please install Homebrew first: https://brew.sh/"
|
||||
exit 1
|
||||
fi
|
||||
brew install "$tool"
|
||||
fi
|
||||
done
|
||||
|
||||
# Authenticate with GitHub Container Registry
|
||||
echo "$GITHUB_TOKEN" | oras login ghcr.io -u "$organization" --password-stdin
|
||||
|
||||
# Create a temporary directory for processing files
|
||||
work_dir=$(mktemp -d)
|
||||
echo "Working directory: $work_dir"
|
||||
trap 'rm -rf "$work_dir"' EXIT
|
||||
|
||||
# Create a directory for all files
|
||||
mkdir -p "$work_dir/files"
|
||||
cd "$work_dir/files"
|
||||
|
||||
# Copy config.json if it exists
|
||||
if [ -f "$folder_path/config.json" ]; then
|
||||
echo "Copying config.json..."
|
||||
cp "$folder_path/config.json" config.json
|
||||
fi
|
||||
|
||||
# Copy nvram.bin if it exists
|
||||
nvram_bin="$folder_path/nvram.bin"
|
||||
if [ -f "$nvram_bin" ]; then
|
||||
echo "Copying nvram.bin..."
|
||||
cp "$nvram_bin" nvram.bin
|
||||
fi
|
||||
|
||||
# Process disk.img if it exists and needs splitting
|
||||
disk_img="$folder_path/disk.img"
|
||||
if [ -f "$disk_img" ]; then
|
||||
file_size=$(stat -f%z "$disk_img")
|
||||
if [ $file_size -gt 524288000 ]; then # 500MB in bytes
|
||||
echo "Splitting large file: disk.img"
|
||||
echo "Original disk.img size: $(du -h "$disk_img" | cut -f1)"
|
||||
|
||||
# Copy and split the file with progress monitoring
|
||||
echo "Copying disk image..."
|
||||
pv "$disk_img" > disk.img
|
||||
|
||||
echo "Splitting file..."
|
||||
split -b "$chunk_size" disk.img disk.img.part.
|
||||
rm disk.img
|
||||
|
||||
# Get original file size for verification
|
||||
original_size=$(stat -f%z "$disk_img")
|
||||
echo "Original disk.img size: $(awk -v size=$original_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
|
||||
|
||||
# Verify split parts total size
|
||||
total_size=0
|
||||
total_parts=$(ls disk.img.part.* | wc -l | tr -d ' ')
|
||||
part_num=0
|
||||
|
||||
# Create array for files and their annotations
|
||||
files=()
|
||||
for part in disk.img.part.*; do
|
||||
part_size=$(stat -f%z "$part")
|
||||
total_size=$((total_size + part_size))
|
||||
part_num=$((part_num + 1))
|
||||
echo "Part $part: $(awk -v size=$part_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
|
||||
files+=("$part:application/vnd.oci.image.layer.v1.tar;part.number=$part_num;part.total=$total_parts")
|
||||
done
|
||||
|
||||
echo "Total size of parts: $(awk -v size=$total_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
|
||||
|
||||
# Verify total size matches original
|
||||
if [ $total_size -ne $original_size ]; then
|
||||
echo "ERROR: Size mismatch!"
|
||||
echo "Original file size: $(awk -v size=$original_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
|
||||
echo "Sum of parts size: $(awk -v size=$total_size 'BEGIN {printf "%.2f GB", size/1024/1024/1024}')"
|
||||
echo "Difference: $(awk -v orig=$original_size -v total=$total_size 'BEGIN {printf "%.2f GB", (orig-total)/1024/1024/1024}')"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add remaining files
|
||||
if [ -f "config.json" ]; then
|
||||
files+=("config.json:application/vnd.oci.image.config.v1+json")
|
||||
fi
|
||||
|
||||
if [ -f "nvram.bin" ]; then
|
||||
files+=("nvram.bin:application/octet-stream")
|
||||
fi
|
||||
|
||||
# Push versions in parallel
|
||||
push_pids=()
|
||||
for version in $image_versions; do
|
||||
(
|
||||
echo "Pushing version $version..."
|
||||
oras push --disable-path-validation \
|
||||
"ghcr.io/$organization/$image_name:$version" \
|
||||
"${files[@]}"
|
||||
echo "Completed push for version $version"
|
||||
) &
|
||||
push_pids+=($!)
|
||||
done
|
||||
|
||||
# Wait for all pushes to complete
|
||||
for pid in "${push_pids[@]}"; do
|
||||
wait "$pid"
|
||||
done
|
||||
else
|
||||
# Push disk.img directly if it's small enough
|
||||
echo "Copying disk image..."
|
||||
pv "$disk_img" > disk.img
|
||||
|
||||
# Push all files together
|
||||
echo "Pushing all files..."
|
||||
files=("disk.img:application/vnd.oci.image.layer.v1.tar")
|
||||
|
||||
if [ -f "config.json" ]; then
|
||||
files+=("config.json:application/vnd.oci.image.config.v1+json")
|
||||
fi
|
||||
|
||||
if [ -f "nvram.bin" ]; then
|
||||
files+=("nvram.bin:application/octet-stream")
|
||||
fi
|
||||
|
||||
for version in $image_versions; do
|
||||
# Push all files in one command
|
||||
oras push --disable-path-validation \
|
||||
"ghcr.io/$organization/$image_name:$version" \
|
||||
"${files[@]}"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
for version in $image_versions; do
|
||||
echo "Upload complete: ghcr.io/$organization/$image_name:$version"
|
||||
done
|
||||
Reference in New Issue
Block a user