Add Lumier

This commit is contained in:
f-trycua
2025-04-27 22:43:34 -07:00
parent cd40bba74f
commit 967a732bba
17 changed files with 1355 additions and 2 deletions

8
.gitignore vendored
View File

@@ -15,7 +15,8 @@ dist/
downloads/
eggs/
.eggs/
lib/
lib/*
!libs/lumier/src/lib/
lib64/
parts/
sdist/
@@ -242,4 +243,7 @@ trajectories/
.storage/
# Gradio settings
.gradio_settings.json
.gradio_settings.json
# Lumier Storage
storage/

30
.vscode/lumier.code-workspace vendored Normal file
View File

@@ -0,0 +1,30 @@
{
"folders": [
{
"name": "lumier",
"path": "../libs/lumier"
},
{
"name": "lume",
"path": "../libs/lume"
}
],
"settings": {
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true
}
},
"tasks": {
"version": "2.0.0",
"tasks": [
]
},
"launch": {
"configurations": [
]
}
}

24
libs/lumier/.dockerignore Normal file
View File

@@ -0,0 +1,24 @@
# Ignore macOS system files and trash
.DS_Store
.Trashes
**/.Trashes
**/.*
# Ignore Python cache
__pycache__/
*.pyc
*.pyo
# Ignore virtual environments
.venv/
venv/
# Ignore editor/project files
.vscode/
.idea/
*.swp
# Ignore test artifacts
test-results/
# Ignore anything else you don't want in the Docker build context

74
libs/lumier/Dockerfile Normal file
View File

@@ -0,0 +1,74 @@
# Base image using Debian for arm64 architecture (optimized for Apple Silicon)
FROM debian:bullseye-slim AS lumier-base
# Set environment variables for Lume API server configuration
ENV LUME_API_HOST="host.docker.internal"
ENV LUME_API_PORT="8080"
# Default VM configuration (can be overridden at runtime)
ENV VERSION="ghcr.io/trycua/macos-sequoia-vanilla:latest"
ENV RAM_SIZE="8192"
ENV CPU_CORES="4"
ENV DISK_SIZE="100"
ENV DISPLAY="1024x768"
ENV VM_NAME="lumier"
ENV HOST_DATA_PATH=""
ENV LUMIER_DEBUG="0"
# Install necessary tools and noVNC dependencies
RUN apt-get update && \
apt-get install -y \
netcat-traditional \
curl \
sshpass \
wget \
unzip \
git \
python3 \
python3-pip \
python3-numpy \
procps && \
rm -rf /var/lib/apt/lists/*
# Add a dummy environment variable to invalidate cache
ENV CACHEBUST=1
# Download and install noVNC without caching
RUN wget https://github.com/trycua/noVNC/archive/refs/heads/master.zip -O master1.zip && \
unzip master1.zip && \
mv noVNC-master /opt/noVNC && \
rm master1.zip
# Set environment variables for noVNC
ENV NOVNC_PATH="/opt/noVNC"
# Create directory structure
RUN mkdir -p /run/bin /run/lib /run/config /run/hooks
# Copy scripts to the container
COPY src/bin/tunnel.sh /run/bin/
COPY src/bin/tunnel-script.sh /usr/local/bin/lume
COPY src/bin/tunnel-script.sh /usr/local/bin/sshpass
COPY src/config/constants.sh /run/config/
COPY src/bin/entry.sh /run/bin/entry.sh
# Copy library files if they exist
COPY src/lib/ /run/lib/
COPY src/hooks/ /run/hooks/
# Make scripts executable
RUN chmod +x /usr/local/bin/lume \
/usr/local/bin/sshpass \
/run/bin/* \
/run/hooks/* 2>/dev/null || true
# Expose ports for noVNC and Lume API
EXPOSE 8080
EXPOSE 8006
# VOLUME setup
VOLUME [ "/storage" ]
VOLUME [ "/data" ]
# Default entrypoint
ENTRYPOINT ["/run/bin/entry.sh"]

175
libs/lumier/README.md Normal file
View File

@@ -0,0 +1,175 @@
<div align="center">
<h1>
<div class="image-wrapper" style="display: inline-block;">
<picture>
<source media="(prefers-color-scheme: dark)" alt="logo" height="150" srcset="../../img/logo_white.png" style="display: block; margin: auto;">
<source media="(prefers-color-scheme: light)" alt="logo" height="150" srcset="../../img/logo_black.png" style="display: block; margin: auto;">
<img alt="Shows my svg">
</picture>
</div>
[![Swift 6](https://img.shields.io/badge/Swift_6-F54A2A?logo=swift&logoColor=white&labelColor=F54A2A)](#)
[![macOS](https://img.shields.io/badge/macOS-000000?logo=apple&logoColor=F0F0F0)](#)
[![Homebrew](https://img.shields.io/badge/Homebrew-FBB040?logo=homebrew&logoColor=fff)](#install)
[![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?&logo=discord&logoColor=white)](https://discord.com/invite/mVnXXpdE85)
</h1>
</div>
**Lumier** provides a Docker-based interface for the `lume` CLI, allowing you to easily run macOS virtual machines inside a container with VNC access. It creates a secure tunnel to execute lume commands on your host machine while providing a containerized environment for your applications.
## Requirements
Before using Lumier, make sure you have:
1. Install [lume](https://github.com/trycua/cua/blob/main/libs/lume/README.md) on your host machine
2. Docker installed on your host machine
3. `socat` installed for the tunnel (install with Homebrew: `brew install socat`)
## Installation
You can use Lumier directly from its directory or install it to your system:
```bash
# Option 1: Install to your user's bin directory (recommended)
./install.sh
# Option 2: Install to a custom directory
./install.sh --install-dir=/usr/local/bin # May require sudo
# Option 3: View installation options
./install.sh --help
```
After installation, you can run `lumier` from anywhere in your terminal.
If you get a "command not found" error, make sure the installation directory is in your PATH. The installer will warn you if it isn't and provide instructions to add it.
## Usage
There are two ways to use Lumier: with the provided script or directly with Docker.
### Option 1: Using the Lumier Script
Lumier provides a simple CLI interface to manage VMs in Docker with full Docker compatibility:
```bash
# Show help and available commands
lumier help
# Start the tunnel to connect to lume
lumier start
# Check if the tunnel is running
lumier status
# Stop the tunnel
lumier stop
# Build the Docker image (optional, happens automatically on first run)
lumier build
# Run a VM with default settings
lumier run -it --rm
# Run a VM with custom settings using Docker's -e flag
lumier run -it --rm \
--name lumier-vm \
-p 8006:8006 \
-v $(pwd)/storage:/storage \
-v $(pwd)/shared:/data \
-e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \
-e CPU_CORES=4 \
-e RAM_SIZE=8192
# Note:
# The lumier script now automatically detects the real host paths for ./storage and ./shared
# and passes them to the container as HOST_STORAGE_PATH and HOST_DATA_PATH.
# You do NOT need to specify these environment variables manually.
# The VM name is always set from the container name.
```
### Option 2: Using Docker Directly
You can also use Docker commands directly without the lumier utility:
```bash
# 1. Start the tunnel manually
cd libs/lumier
socat TCP-LISTEN:8080,reuseaddr,fork EXEC:"$PWD/src/bin/tunnel.sh" &
TUNNEL_PID=$!
# 2. Build the Docker image
docker build -t lumier:latest .
# 3. Run the container
docker run -it --rm \
--name lumier-vm \
-p 8006:8006 \
-v $(pwd)/storage:/storage \
-v $(pwd)/shared:/data \
-e VM_NAME=lumier-vm \
-e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \
-e CPU_CORES=4 \
-e RAM_SIZE=8192 \
-e HOST_STORAGE_PATH=$(pwd)/storage \
-e HOST_DATA_PATH=$(pwd)/shared \
lumier:latest
# 4. Stop the tunnel when you're done
kill $TUNNEL_PID
# Alternatively, find and kill the tunnel process
# First, find the process
lsof -i TCP:8080
# Then kill it by PID
kill <PID>
```
Note that when using Docker directly, you're responsible for:
- Starting and managing the tunnel
- Building the Docker image
- Providing the correct environment variables
## Available Environment Variables
These variables can be set using Docker's `-e` flag:
- `VM_NAME`: Set the VM name (default: lumier)
- `VERSION`: Set the VM image (default: ghcr.io/trycua/macos-sequoia-vanilla:latest)
- `CPU_CORES`: Set the number of CPU cores (default: 4)
- `RAM_SIZE`: Set the memory size in MB (default: 8192)
- `DISPLAY`: Set the display resolution (default: 1024x768)
- `HOST_DATA_PATH`: Path on the host to share with the VM
- `LUMIER_DEBUG`: Enable debug mode (set to 1)
## Project Structure
The project is organized as follows:
```
lumier/
├── Dockerfile # Main Docker image definition
├── README.md # This file
├── lumier # Main CLI script
├── install.sh # Installation script
├── src/ # Source code
│ ├── bin/ # Executable scripts
│ │ ├── entry.sh # Docker entrypoint
│ │ ├── server.sh # Tunnel server manager
│ │ └── tunnel.sh # Tunnel request handler
│ ├── config/ # Configuration
│ │ └── constants.sh # Shared constants
│ ├── hooks/ # Lifecycle hooks
│ │ └── on-logon.sh # Run after VM boots
│ └── lib/ # Shared library code
│ ├── utils.sh # Utility functions
│ └── vm.sh # VM management functions
└── mount/ # Default shared directory
```
## VNC Access
When a VM is running, you can access it via VNC through:
http://localhost:8006/vnc.html
The password is displayed in the console output when the VM starts.

176
libs/lumier/install.sh Executable file
View File

@@ -0,0 +1,176 @@
#!/bin/bash
set -e
# Lumier Installer
# This script installs Lumier to your system
# Define colors for output
BOLD=$(tput bold)
NORMAL=$(tput sgr0)
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
BLUE=$(tput setaf 4)
YELLOW=$(tput setaf 3)
# Default installation directory (user-specific, doesn't require sudo)
DEFAULT_INSTALL_DIR="$HOME/.local/bin"
INSTALL_DIR="${INSTALL_DIR:-$DEFAULT_INSTALL_DIR}"
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Parse command line arguments
while [ "$#" -gt 0 ]; do
case "$1" in
--install-dir=*)
INSTALL_DIR="${1#*=}"
;;
--help)
echo "${BOLD}${BLUE}Lumier Installer${NORMAL}"
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --install-dir=DIR Install to the specified directory (default: $DEFAULT_INSTALL_DIR)"
echo " --help Display this help message"
echo ""
echo "Examples:"
echo " $0 # Install to $DEFAULT_INSTALL_DIR"
echo " $0 --install-dir=/usr/local/bin # Install to system directory (may require root privileges)"
echo " INSTALL_DIR=/opt/lumier $0 # Install to /opt/lumier (legacy env var support)"
exit 0
;;
*)
echo "${RED}Unknown option: $1${NORMAL}"
echo "Use --help for usage information"
exit 1
;;
esac
shift
done
echo "${BOLD}${BLUE}Lumier Installer${NORMAL}"
echo "This script will install Lumier to your system."
# Check if we're running with appropriate permissions
check_permissions() {
# System directories that typically require root privileges
SYSTEM_DIRS=("/usr/local/bin" "/usr/bin" "/bin" "/opt")
NEEDS_ROOT=false
for DIR in "${SYSTEM_DIRS[@]}"; do
if [[ "$INSTALL_DIR" == "$DIR"* ]] && [ ! -w "$INSTALL_DIR" ]; then
NEEDS_ROOT=true
break
fi
done
if [ "$NEEDS_ROOT" = true ]; then
echo "${YELLOW}Warning: Installing to $INSTALL_DIR may require root privileges.${NORMAL}"
echo "Consider these alternatives:"
echo " • Install to a user-writable location: $0 --install-dir=$HOME/.local/bin"
echo " • Create the directory with correct permissions first:"
echo " sudo mkdir -p $INSTALL_DIR && sudo chown $(whoami) $INSTALL_DIR"
echo ""
# Check if we already have write permission (might have been set up previously)
if [ ! -w "$INSTALL_DIR" ] && [ ! -w "$(dirname "$INSTALL_DIR")" ]; then
echo "${RED}Error: You don't have write permission to $INSTALL_DIR${NORMAL}"
echo "Please choose a different installation directory or ensure you have the proper permissions."
exit 1
fi
fi
}
# Detect OS and architecture
detect_platform() {
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m)
if [ "$OS" != "darwin" ]; then
echo "${RED}Error: Currently only macOS is supported.${NORMAL}"
exit 1
fi
if [ "$ARCH" != "arm64" ]; then
echo "${RED}Error: Lumier only supports macOS on Apple Silicon (ARM64).${NORMAL}"
exit 1
fi
PLATFORM="darwin-arm64"
echo "Detected platform: ${BOLD}$PLATFORM${NORMAL}"
}
# Check dependencies
check_dependencies() {
echo "Checking dependencies..."
# Check if lume is installed
if ! command -v lume &> /dev/null; then
echo "${RED}Error: Lume is required but not installed.${NORMAL}"
echo "Please install Lume first: https://github.com/trycua/cua/blob/main/libs/lume/README.md"
exit 1
fi
# Check if socat is installed
if ! command -v socat &> /dev/null; then
echo "${YELLOW}Warning: socat is required but not installed.${NORMAL}"
echo "Installing socat with Homebrew..."
# Check if Homebrew is installed
if ! command -v brew &> /dev/null; then
echo "${RED}Error: Homebrew is required to install socat.${NORMAL}"
echo "Please install Homebrew first: https://brew.sh/"
echo "Or install socat manually, then run this script again."
exit 1
fi
# Install socat
brew install socat
fi
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
echo "${YELLOW}Warning: Docker is required but not installed.${NORMAL}"
echo "Please install Docker: https://docs.docker.com/get-docker/"
echo "Continuing with installation, but Lumier will not work without Docker."
fi
echo "${GREEN}All dependencies are satisfied.${NORMAL}"
}
# Copy the lumier script directly
copy_lumier() {
echo "Copying lumier script to $INSTALL_DIR..."
cp "$SCRIPT_DIR/lumier" "$INSTALL_DIR/lumier"
chmod +x "$INSTALL_DIR/lumier"
}
# Main installation flow
main() {
check_permissions
detect_platform
check_dependencies
echo "Installing Lumier to $INSTALL_DIR..."
# Create install directory if it doesn't exist
mkdir -p "$INSTALL_DIR"
# Copy the lumier script
copy_lumier
echo "${GREEN}Installation complete!${NORMAL}"
echo "Lumier has been installed to ${BOLD}$INSTALL_DIR/lumier${NORMAL}"
# Check if the installation directory is in PATH
if [[ ":$PATH:" != *":$INSTALL_DIR:"* ]]; then
echo "${YELLOW}Warning: $INSTALL_DIR is not in your PATH.${NORMAL}"
echo "To add it, run one of these commands based on your shell:"
echo " For bash: echo 'export PATH=\"\$PATH:$INSTALL_DIR\"' >> ~/.bash_profile"
echo " For zsh: echo 'export PATH=\"\$PATH:$INSTALL_DIR\"' >> ~/.zshrc"
echo " For fish: echo 'fish_add_path $INSTALL_DIR' >> ~/.config/fish/config.fish"
fi
}
# Run the installation
main

200
libs/lumier/lumier Executable file
View File

@@ -0,0 +1,200 @@
#!/usr/bin/env bash
# Exit on errors, undefined variables, and propagate errors in pipes
set -eo pipefail
# Always use the current working directory as the build context
SCRIPT_DIR="$(pwd)"
PORT=8080
DEBUG=${LUMIER_DEBUG:-0}
usage() {
cat <<EOF
Lumier - Docker container wrapper for lume Virtual Machines
Usage: $(basename "$0") COMMAND [OPTIONS]
Commands:
run [DOCKER_ARGS] Build (if needed) and run the Lumier container with Docker args
tunnel start Start the Lumier tunnel
tunnel stop Stop the Lumier tunnel
tunnel status Check the status of the Lumier tunnel
build [DOCKER_ARGS] Build the Lumier Docker image with optional Docker args
help Show this help message
Docker Container Environment Variables:
These can be set using Docker's -e flag:
VM_NAME Set the VM name (default: lumier)
VERSION Set the VM image (default: ghcr.io/trycua/macos-sequoia-vanilla:latest)
CPU_CORES Set the number of CPU cores (default: 4)
RAM_SIZE Set the memory size in MB (default: 8192)
HOST_DATA_PATH Path to mount as shared directory in the VM
LUMIER_DEBUG Enable debug mode (set to 1)
Script Environment Variables:
LUMIER_IMAGE Docker image name (default: lumier:latest)
Examples:
# Run a VM with default settings
$(basename "$0") run -it --rm
# Run a VM with custom settings using Docker's -e flag
$(basename "$0") run -it --rm \\
--name custom-container-name \\
-e VM_NAME=my-vm \\
-e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \\
-e RAM_SIZE=16384 \\
-v $(pwd)/mount:/data
# Build with a custom image name
LUMIER_IMAGE=myorg/lumier:v1 $(basename "$0") build
EOF
}
# Check if the tunnel is active
is_tunnel_active() {
if lsof -i TCP:$PORT 2>/dev/null | grep LISTEN > /dev/null; then
return 0 # Tunnel is active
else
return 1 # Tunnel is not active
fi
}
# Start the tunnel if needed
ensure_tunnel() {
if ! is_tunnel_active; then
echo "Tunnel is not active. Starting tunnel..."
"$SCRIPT_DIR/src/bin/server.sh" start
sleep 2 # Wait for the tunnel to start
if ! is_tunnel_active; then
echo "Failed to start tunnel. Make sure 'lume' is installed on your host."
exit 1
fi
else
echo "Tunnel is already active."
fi
}
# Build the Docker image with cache busting
build_image() {
local image_name="${LUMIER_IMAGE:-lumier:latest}"
echo "Building Lumier Docker image: $image_name"
echo "SCRIPT_DIR=$SCRIPT_DIR"
echo "Checking for Dockerfile at: $SCRIPT_DIR/Dockerfile"
ls -l "$SCRIPT_DIR/Dockerfile" || echo "Dockerfile not found at $SCRIPT_DIR/Dockerfile"
# Pass any additional arguments to docker build with cache busting
docker build --build-arg CACHEBUST=$(date +%s) -t "$image_name" "$SCRIPT_DIR" "$@"
echo "Lumier image built successfully: $image_name"
}
# Run the Docker container
run_container() {
local image_name="${LUMIER_IMAGE:-lumier:latest}"
# Ensure the Docker image exists
if ! docker image inspect "$image_name" &>/dev/null; then
echo "Docker image '$image_name' not found. Building it..."
build_image
fi
# Ensure the tunnel is running
ensure_tunnel
# Automatically resolve and pass host paths for storage and data
STORAGE_PATH="${HOST_STORAGE_PATH:-$(realpath ./storage)}"
DATA_PATH="${HOST_DATA_PATH:-$(realpath ./shared)}"
# Only add -e if not already present in args
DOCKER_ARGS=( )
add_env_var() {
local var="$1"; local val="$2"; local flag="-e $var="
for arg in "$@"; do
[[ "$arg" == *"$flag"* ]] && return 0
done
DOCKER_ARGS+=( -e "$var=$val" )
}
add_env_var HOST_STORAGE_PATH "$STORAGE_PATH"
add_env_var HOST_DATA_PATH "$DATA_PATH"
# Detect --name argument and set VM_NAME if not already present
local container_name=""
local prev_arg=""
for arg in "$@"; do
if [[ "$prev_arg" == "--name" ]]; then
container_name="$arg"
break
elif [[ "$arg" == --name=* ]]; then
container_name="${arg#--name=}"
break
fi
prev_arg="$arg"
done
# Only add -e VM_NAME if not already present and container_name is set
local vm_name_set=false
for arg in "$@"; do
if [[ "$arg" == "-e" ]] && [[ "$2" == VM_NAME=* ]]; then
vm_name_set=true
break
elif [[ "$arg" == "-eVM_NAME="* ]]; then
vm_name_set=true
break
elif [[ "$arg" == "-e"* ]] && [[ "$arg" == *"VM_NAME="* ]]; then
vm_name_set=true
break
fi
done
if [[ -n "$container_name" && "$vm_name_set" != true ]]; then
DOCKER_ARGS+=( -e "VM_NAME=$container_name" )
fi
echo "Running Lumier container with image: $image_name"
if [[ "$*" == *"-p 8006:8006"* || "$*" == *"-p"*"8006:8006"* ]]; then
docker run "${DOCKER_ARGS[@]}" "$@" "$image_name"
else
docker run "${DOCKER_ARGS[@]}" -p 8006:8006 "$@" "$image_name"
fi
}
# Main command handling
case "${1:-help}" in
run)
shift
run_container "$@"
;;
tunnel)
# Handle tunnel subcommands
case "${2:-}" in
start)
"$SCRIPT_DIR/src/bin/server.sh" start
;;
stop)
"$SCRIPT_DIR/src/bin/server.sh" stop
;;
status)
"$SCRIPT_DIR/src/bin/server.sh" status
;;
*)
echo "Unknown tunnel subcommand: $2"
usage
exit 1
;;
esac
;;
build)
shift
build_image "$@"
;;
help)
usage
;;
*)
echo "Unknown command: $1"
usage
exit 1
;;
esac

View File

@@ -0,0 +1,10 @@
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World, from VM!'
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=5001)

9
libs/lumier/mount/setup.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
echo "Creating helloworld.txt on the Desktop..."
if [ ! -f ~/Desktop/helloworld.txt ]; then
echo "Hello, World!" > ~/Desktop/helloworld.txt
echo "helloworld.txt created successfully."
else
echo "helloworld.txt already exists."
fi

98
libs/lumier/src/bin/entry.sh Executable file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env bash
# Exit on errors, undefined variables, and propagate errors in pipes
set -euo pipefail
# Source configuration files
CONFIG_DIR="/run/config"
LIB_DIR="/run/lib"
# Source constants if available
if [ -f "${CONFIG_DIR}/constants.sh" ]; then
source "${CONFIG_DIR}/constants.sh"
fi
# Import utilities
for lib in "${LIB_DIR}"/*.sh; do
if [ -f "$lib" ]; then
source "$lib"
fi
done
# Set VM_NAME to env or fallback to container name (from --name)
if [ -z "${VM_NAME:-}" ]; then
VM_NAME="$(cat /etc/hostname)"
export VM_NAME
fi
# Set HOST_STORAGE_PATH to /storage/$VM_NAME if not set
if [ -z "${HOST_STORAGE_PATH:-}" ]; then
HOST_STORAGE_PATH="/storage/$VM_NAME"
export HOST_STORAGE_PATH
fi
# Optionally check for mountpoints
if mountpoint -q /storage; then
echo "/storage is mounted"
fi
if mountpoint -q /data; then
echo "/data is mounted"
fi
# Log startup info
echo "Lumier VM is starting..."
# Cleanup function to ensure VM and noVNC proxy shutdown on container stop
cleanup() {
set +e # Don't exit on error in cleanup
echo "[cleanup] Caught signal, shutting down..."
echo "[cleanup] Stopping VM..."
stop_vm
# Now gently stop noVNC proxy if running
# if [ -n "${NOVNC_PID:-}" ] && kill -0 "$NOVNC_PID" 2>/dev/null; then
# echo "[cleanup] Stopping noVNC proxy (PID $NOVNC_PID)..."
# kill -TERM "$NOVNC_PID"
# # Wait up to 5s for noVNC to exit
# for i in {1..5}; do
# if ! kill -0 "$NOVNC_PID" 2>/dev/null; then
# echo "[cleanup] noVNC proxy stopped."
# break
# fi
# sleep 1
# done
# # Escalate if still running
# if kill -0 "$NOVNC_PID" 2>/dev/null; then
# echo "[cleanup] noVNC proxy did not exit, killing..."
# kill -KILL "$NOVNC_PID" 2>/dev/null
# fi
# fi
echo "[cleanup] Done. Exiting."
exit 0
}
trap cleanup SIGTERM SIGINT
# Start the VM
start_vm
# Start noVNC for VNC access
NOVNC_PID=""
if [ -n "${VNC_PORT:-}" ] && [ -n "${VNC_PASSWORD:-}" ]; then
echo "Starting noVNC proxy with optimized color settings..."
${NOVNC_PATH}/utils/novnc_proxy --vnc host.docker.internal:${VNC_PORT} --listen 8006 --web ${NOVNC_PATH} > /dev/null 2>&1 &
NOVNC_PID=$!
disown $NOVNC_PID
echo "noVNC interface available at: http://localhost:8006/vnc.html?password=${VNC_PASSWORD}&autoconnect=true&logging=debug"
fi
# Run any post-startup hooks
if [ -d "/run/hooks" ]; then
for hook in /run/hooks/*; do
if [ -x "$hook" ]; then
echo "Running hook: $(basename "$hook")"
"$hook"
fi
done
fi
echo "Lumier is running. Press Ctrl+C to stop."
tail -f /dev/null

99
libs/lumier/src/bin/server.sh Executable file
View File

@@ -0,0 +1,99 @@
#!/usr/bin/env bash
# Exit on errors, undefined variables, and propagate errors in pipes
set -euo pipefail
# Source constants if available
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -f "${SCRIPT_DIR}/../config/constants.sh" ]; then
source "${SCRIPT_DIR}/../config/constants.sh"
fi
# Use the tunnel port from constants if available, otherwise default to 8080
PORT="${TUNNEL_PORT:-8080}"
TUNNEL_SCRIPT="${SCRIPT_DIR}/tunnel.sh"
# Function to check if the tunnel is active
is_tunnel_active() {
if lsof -i TCP:$PORT 2>/dev/null | grep LISTEN > /dev/null; then
return 0 # Tunnel is active
else
return 1 # Tunnel is not active
fi
}
# Function to start the tunnel
start_tunnel() {
echo "Starting tunnel on port $PORT..."
if is_tunnel_active; then
echo "Tunnel is already running on port $PORT."
return 0
fi
# Start socat in the background
socat TCP-LISTEN:$PORT,reuseaddr,fork EXEC:"$TUNNEL_SCRIPT" &
SOCAT_PID=$!
# Check if the tunnel started successfully
sleep 1
if ! is_tunnel_active; then
echo "Failed to start tunnel on port $PORT."
return 1
fi
echo "Tunnel started successfully on port $PORT (PID: $SOCAT_PID)."
return 0
}
# Function to stop the tunnel
stop_tunnel() {
echo "Stopping tunnel on port $PORT..."
if ! is_tunnel_active; then
echo "No tunnel running on port $PORT."
return 0
fi
# Find and kill the socat process
local pid=$(lsof -i TCP:$PORT | grep LISTEN | awk '{print $2}')
if [ -n "$pid" ]; then
kill $pid
echo "Tunnel stopped (PID: $pid)."
return 0
else
echo "Failed to find process using port $PORT."
return 1
fi
}
# Function to check tunnel status
status_tunnel() {
if is_tunnel_active; then
local pid=$(lsof -i TCP:$PORT | grep LISTEN | awk '{print $2}')
echo "Tunnel is active on port $PORT (PID: $pid)."
return 0
else
echo "No tunnel running on port $PORT."
return 1
fi
}
# Parse command line arguments
case "${1:-}" in
start)
start_tunnel
;;
stop)
stop_tunnel
;;
restart)
stop_tunnel
start_tunnel
;;
status)
status_tunnel
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Source constants if running in container context
if [ -f "/run/config/constants.sh" ]; then
source "/run/config/constants.sh"
fi
# Define server address with fallback
SERVER="${TUNNEL_HOST:-host.docker.internal}:${TUNNEL_PORT:-8080}"
# Extract the base name of the command and arguments
command=$(basename "$0")
subcommand="$1"
shift
args="$@"
command="$command $subcommand $args"
# Concatenate command and any stdin data
full_data="$command"
if [ ! -t 0 ]; then
stdin_data=$(cat)
if [ -n "$stdin_data" ]; then
# Format full_data to include stdin data
full_data="$full_data << 'EOF'
$stdin_data
EOF"
fi
fi
# Trim leading/trailing whitespace and newlines
full_data=$(echo -e "$full_data" | sed 's/^[ \t\n]*//;s/[ \t\n]*$//')
# Log command if debug is enabled
if [ "${LUMIER_DEBUG:-0}" -eq 1 ]; then
echo "Executing lume command: $full_data" >&2
echo "Sending to: $SERVER" >&2
fi
# Use curl with -N to disable output buffering and -s for silent mode
curl -N -s -X POST \
-H "Content-Type: application/octet-stream" \
--data-binary @- \
"http://$SERVER" <<< "$full_data"

96
libs/lumier/src/bin/tunnel.sh Executable file
View File

@@ -0,0 +1,96 @@
#!/usr/bin/env bash
# Exit on errors, undefined variables, and propagate errors in pipes
set -euo pipefail
# Source constants if available
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -f "${SCRIPT_DIR}/../config/constants.sh" ]; then
source "${SCRIPT_DIR}/../config/constants.sh"
fi
# Handle errors and cleanup
cleanup() {
local exit_code=$?
# Clean up any temporary files if they exist
[ -n "${temp_file:-}" ] && [ -f "$temp_file" ] && rm "$temp_file"
[ -n "${fifo:-}" ] && [ -p "$fifo" ] && rm "$fifo"
exit $exit_code
}
trap cleanup EXIT INT TERM
log_debug() {
if [ "${LUMIER_DEBUG:-0}" -eq 1 ]; then
echo "[DEBUG] $*" >&2
fi
}
send_error_response() {
local status_code=$1
local message=$2
echo "HTTP/1.1 $status_code"
echo "Content-Type: text/plain"
echo ""
echo "$message"
exit 1
}
# Read the HTTP request line
read -r request_line
log_debug "Request: $request_line"
# Read headers and look for Content-Length
content_length=0
while IFS= read -r header; do
[[ $header == $'\r' ]] && break # End of headers
log_debug "Header: $header"
if [[ "$header" =~ ^Content-Length:\ ([0-9]+) ]]; then
content_length="${BASH_REMATCH[1]}"
fi
done
# Read the body using the content length
command=""
if [ "$content_length" -gt 0 ]; then
command=$(dd bs=1 count="$content_length" 2>/dev/null)
log_debug "Received command: $command"
fi
# Determine the executable and arguments based on the command
if [[ "$command" == lume* ]]; then
executable="$(which lume || echo "/usr/local/bin/lume")"
command_args="${command#lume}" # Remove 'lume' from the command
elif [[ "$command" == sshpass* ]]; then
executable="$(which sshpass || echo "/usr/local/bin/sshpass")"
command_args="${command#sshpass}"
else
send_error_response "400 Bad Request" "Unsupported command: $command"
fi
# Check if executable exists
if [ ! -x "$executable" ]; then
send_error_response "500 Internal Server Error" "Executable not found or not executable: $executable"
fi
# Create a temporary file to store the command
temp_file=$(mktemp)
echo "$executable $command_args" > "$temp_file"
chmod +x "$temp_file"
# Create a FIFO (named pipe) for capturing output
fifo=$(mktemp -u)
mkfifo "$fifo"
# Execute the command and pipe its output through awk to ensure line-buffering
{
log_debug "Executing: $executable $command_args"
"$temp_file" 2>&1 | awk '{ print; fflush() }' > "$fifo"
} &
# Stream the output from the FIFO as an HTTP response
{
echo -e "HTTP/1.1 200 OK\r"
echo -e "Content-Type: text/plain\r"
echo -e "\r"
cat "$fifo"
}

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
# Port configuration
TUNNEL_PORT=8080
VNC_PORT=8006
# Host configuration
TUNNEL_HOST="host.docker.internal"
# Default VM configuration
DEFAULT_RAM_SIZE="8192"
DEFAULT_CPU_CORES="4"
DEFAULT_DISK_SIZE="100"
DEFAULT_VM_NAME="lumier"
DEFAULT_VM_VERSION="ghcr.io/trycua/macos-sequoia-vanilla:latest"
# Paths
NOVNC_PATH="/opt/noVNC"
LIFECYCLE_HOOKS_DIR="/run/hooks"
# VM connection details
HOST_USER="lume"
HOST_PASSWORD="lume"
SSH_RETRY_ATTEMPTS=20
SSH_RETRY_INTERVAL=5

View File

@@ -0,0 +1,8 @@
setup_script="$DATA_FOLDER_PATH/setup.sh"
if [ -f "$setup_script" ]; then
chmod +x "$setup_script"
source "$setup_script"
else
echo "Setup script not found at: $setup_script"
fi

106
libs/lumier/src/lib/utils.sh Executable file
View File

@@ -0,0 +1,106 @@
#!/usr/bin/env bash
# Function to wait for SSH to become available
wait_for_ssh() {
local host_ip=$1
local user=$2
local password=$3
local retry_interval=${4:-5} # Default retry interval is 5 seconds
local max_retries=${5:-20} # Default maximum retries is 20 (0 for infinite)
echo "Waiting for SSH to become available on $host_ip..."
local retry_count=0
while true; do
# Try to connect via SSH
sshpass -p "$password" ssh -o StrictHostKeyChecking=no "$user@$host_ip" "exit"
# Check the exit status of the SSH command
if [ $? -eq 0 ]; then
echo "SSH is ready on $host_ip!"
return 0
fi
# Increment retry count
((retry_count++))
# Exit if maximum retries are reached
if [ $max_retries -ne 0 ] && [ $retry_count -ge $max_retries ]; then
echo "Maximum retries reached. SSH is not available."
return 1
fi
echo "SSH not ready. Retrying in $retry_interval seconds... (Attempt $retry_count)"
sleep $retry_interval
done
}
# Function to execute a script on a remote server using sshpass
execute_remote_script() {
local host="$1"
local user="$2"
local password="$3"
local script_path="$4"
local vnc_password="$5"
local data_folder="$6"
# Check if all required arguments are provided
if [ -z "$host" ] || [ -z "$user" ] || [ -z "$password" ] || [ -z "$script_path" ] || [ -z "$vnc_password" ]; then
echo "Usage: execute_remote_script <host> <user> <password> <script_path> <vnc_password> [data_folder]"
return 1
fi
echo "VNC password exported to VM: $vnc_password"
data_folder_path="$VM_SHARED_FILES_PATH/$data_folder"
echo "Data folder path in VM: $data_folder_path"
# Read the script content and prepend the shebang
script_content="#!/usr/bin/env bash\n"
if [ -n "$data_folder" ]; then
script_content+="export VNC_PASSWORD='$vnc_password'\n"
script_content+="export DATA_FOLDER_PATH='$data_folder_path'\n"
fi
script_content+="$(<"$script_path")"
# Use a here-document to send the script content
sshpass -p "$password" ssh -o StrictHostKeyChecking=no "$user@$host" "bash -s" <<EOF
$script_content
EOF
# Check the exit status of the sshpass command
if [ $? -ne 0 ]; then
echo "Failed to execute script on remote host $host."
return 1
fi
}
# Example usage
# output = execute_remote_script('192.168.1.100', 'username', 'password', '/path/to/script.sh')
# print(output)
extract_json_field() {
local field_name=$1
local input=$2
local result
result=$(echo "$input" | grep -oP '"'"$field_name"'"\s*:\s*"\K[^"]+')
if [[ $? -ne 0 ]]; then
echo ""
else
echo "$result"
fi
}
extract_json_field_from_file() {
local field_name=$1
local json_file=$2
local json_text
json_text=$(<"$json_file")
extract_json_field "$field_name" "$json_text"
}
extract_json_field_from_text() {
local field_name=$1
local json_text=$2
extract_json_field "$field_name" "$json_text"
}

175
libs/lumier/src/lib/vm.sh Executable file
View File

@@ -0,0 +1,175 @@
#!/usr/bin/env bash
start_vm() {
# Set up dedicated storage for this VM
STORAGE_NAME="storage_${VM_NAME}"
if [ -n "$HOST_STORAGE_PATH" ]; then
lume config storage add "$STORAGE_NAME" "$HOST_STORAGE_PATH" >/dev/null 2>&1 || true
fi
# Check if VM exists and its status using JSON format
VM_INFO=$(lume get "$VM_NAME" --storage "$STORAGE_NAME" -f json 2>&1)
# Check if VM not found error
if [[ $VM_INFO == *"Virtual machine not found"* ]]; then
IMAGE_NAME="${VERSION##*/}"
lume pull "$IMAGE_NAME" "$VM_NAME" --storage "$STORAGE_NAME"
else
# Parse the JSON status - check if it contains "status" : "running"
if [[ $VM_INFO == *'"status" : "running"'* ]]; then
lume_stop "$VM_NAME" "$STORAGE_NAME"
fi
fi
# Set VM parameters
lume set "$VM_NAME" --cpu "$CPU_CORES" --memory "${RAM_SIZE}MB" --display "$DISPLAY" --storage "$STORAGE_NAME"
# Fetch VM configuration
CONFIG_JSON=$(lume get "$VM_NAME" --storage "$STORAGE_NAME" -f json)
# Setup data directory args if necessary
SHARED_DIR_ARGS=""
if [ -d "/data" ]; then
if [ -n "$HOST_DATA_PATH" ]; then
SHARED_DIR_ARGS="--shared-dir=$HOST_DATA_PATH"
else
echo "Warning: /data volume exists but HOST_DATA_PATH is not set. Cannot mount volume."
fi
fi
# Run VM with VNC and shared directory using curl
lume_run $SHARED_DIR_ARGS --storage "$STORAGE_NAME" "$VM_NAME" &
# Wait for VM to be running and VNC URL to be available
vm_ip=""
vnc_url=""
max_attempts=30
attempt=0
while [ $attempt -lt $max_attempts ]; do
# Get VM info as JSON
VM_INFO=$(lume get "$VM_NAME" -f json 2>/dev/null)
# Check if VM has status 'running'
if [[ $VM_INFO == *'"status" : "running"'* ]]; then
# Extract IP address using the existing function from utils.sh
vm_ip=$(extract_json_field "ipAddress" "$VM_INFO")
# Extract VNC URL using the existing function from utils.sh
vnc_url=$(extract_json_field "vncUrl" "$VM_INFO")
# If we have both IP and VNC URL, break the loop
if [ -n "$vm_ip" ] && [ -n "$vnc_url" ]; then
break
fi
fi
sleep 2
attempt=$((attempt + 1))
done
if [ -z "$vm_ip" ] || [ -z "$vnc_url" ]; then
echo "Timed out waiting for VM to start or VNC URL to become available."
lume_stop "$VM_NAME" "$STORAGE_NAME" > /dev/null 2>&1
exit 1
fi
# Parse VNC URL to extract password and port
VNC_PASSWORD=$(echo "$vnc_url" | sed -n 's/.*:\(.*\)@.*/\1/p')
VNC_PORT=$(echo "$vnc_url" | sed -n 's/.*:\([0-9]\+\)$/\1/p')
# Wait for SSH to become available
wait_for_ssh "$vm_ip" "$HOST_USER" "$HOST_PASSWORD" 5 20
# Export VNC variables for entry.sh to use
export VNC_PORT
export VNC_PASSWORD
# Execute on-logon.sh if present
on_logon_script="/run/lifecycle/on-logon.sh"
if [ -f "$on_logon_script" ]; then
execute_remote_script "$vm_ip" "$HOST_USER" "$HOST_PASSWORD" "$on_logon_script" "$VNC_PASSWORD" "$DATA_FOLDER"
fi
# The VM is still running because we never killed lume run.
# If you want to stop the VM at some point, you can kill $LUME_PID or use lume_stop.
}
stop_vm() {
echo "Stopping VM '$VM_NAME'..."
STORAGE_NAME="storage_${VM_NAME}"
# Check if the VM exists and is running (use lume get for speed)
VM_INFO=$(lume get "$VM_NAME" --storage "$STORAGE_NAME" -f json 2>/dev/null)
if [[ -z "$VM_INFO" || $VM_INFO == *"Virtual machine not found"* ]]; then
echo "VM '$VM_NAME' does not exist."
elif [[ $VM_INFO == *'"status" : "running"'* ]]; then
lume_stop "$VM_NAME" "$STORAGE_NAME"
echo "VM '$VM_NAME' was running and is now stopped."
elif [[ $VM_INFO == *'"status" : "stopped"'* ]]; then
echo "VM '$VM_NAME' is already stopped."
else
echo "Unknown VM status for '$VM_NAME'."
fi
}
is_vm_running() {
lume ls | grep -q "$VM_NAME"
}
# Stop VM with storage location specified using curl
lume_stop() {
local vm_name="$1"
local storage="$2"
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
-H "Content-Type: application/json" \
-d '{"storage":"'$storage'"}' \
"http://host.docker.internal:3000/lume/vms/${vm_name}/stop"
}
# Run VM with VNC client started and shared directory using curl
lume_run() {
# Parse args
local shared_dir=""
local storage="ssd"
local vm_name="lume_vm"
local no_display=true
while [[ $# -gt 0 ]]; do
case $1 in
--shared-dir=*)
shared_dir="${1#*=}"
shift
;;
--storage)
storage="$2"
shift 2
;;
--no-display)
no_display=true
shift
;;
*)
# Assume last arg is VM name if not an option
vm_name="$1"
shift
;;
esac
done
# Default to ~/Projects if not provided
if [[ -z "$shared_dir" ]]; then
shared_dir="~/Projects"
fi
local json_body="{\"noDisplay\": true, \"sharedDirectories\": [{\"hostPath\": \"$shared_dir\", \"readOnly\": false}], \"storage\": \"$storage\", \"recoveryMode\": false}"
local curl_cmd="curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
-H 'Content-Type: application/json' \
-d '$json_body' \
http://host.docker.internal:3000/lume/vms/$vm_name/run"
echo "[lume_run] Running:"
echo "$curl_cmd"
eval "$curl_cmd"
}