mirror of
https://github.com/trycua/computer.git
synced 2026-01-08 06:20:00 -06:00
Clean up Lumier
This commit is contained in:
@@ -142,10 +142,14 @@ log "normal" "Using release directory: $RELEASE_DIR"
|
||||
# Copy extracted lume to the release directory
|
||||
cp -f usr/local/bin/lume "$RELEASE_DIR/lume"
|
||||
|
||||
# Create symbolic link in /usr/local/bin if not in minimal mode
|
||||
if [ "$LOG_LEVEL" != "minimal" ] && [ "$LOG_LEVEL" != "none" ]; then
|
||||
log "normal" "Creating symbolic link..."
|
||||
sudo ln -sf "$RELEASE_DIR/lume" /usr/local/bin/lume
|
||||
# Install to user-local bin directory (standard location)
|
||||
USER_BIN="$HOME/.local/bin"
|
||||
mkdir -p "$USER_BIN"
|
||||
cp -f "$RELEASE_DIR/lume" "$USER_BIN/lume"
|
||||
|
||||
# Advise user to add to PATH if not present
|
||||
if ! echo "$PATH" | grep -q "$USER_BIN"; then
|
||||
log "normal" "[lume build] Note: $USER_BIN is not in your PATH. Add 'export PATH=\"$USER_BIN:\$PATH\"' to your shell profile."
|
||||
fi
|
||||
|
||||
# Get architecture and create OS identifier
|
||||
|
||||
@@ -8,8 +8,14 @@ codesign --force --entitlement ./resources/lume.entitlements --sign - .build/rel
|
||||
mkdir -p ./.release
|
||||
cp -f .build/release/lume ./.release/lume
|
||||
|
||||
# Create symbolic link in /usr/local/bin
|
||||
sudo mkdir -p /usr/local/bin
|
||||
sudo ln -sf "$(pwd)/.release/lume" /usr/local/bin/lume
|
||||
# Install to user-local bin directory (standard location)
|
||||
USER_BIN="$HOME/.local/bin"
|
||||
mkdir -p "$USER_BIN"
|
||||
cp -f ./.release/lume "$USER_BIN/lume"
|
||||
|
||||
# Advise user to add to PATH if not present
|
||||
if ! echo "$PATH" | grep -q "$USER_BIN"; then
|
||||
echo "[lume build] Note: $USER_BIN is not in your PATH. Add 'export PATH=\"$USER_BIN:\$PATH\"' to your shell profile."
|
||||
fi
|
||||
|
||||
popd
|
||||
@@ -3,7 +3,7 @@ import Foundation
|
||||
import Virtualization
|
||||
|
||||
/// Represents a shared directory configuration
|
||||
struct SharedDirectory {
|
||||
struct SharedDirectory: Codable {
|
||||
let hostPath: String
|
||||
let tag: String
|
||||
let readOnly: Bool
|
||||
|
||||
@@ -53,18 +53,18 @@ extension VMDirectory {
|
||||
let diskExists = diskPath.exists()
|
||||
let nvramExists = nvramPath.exists()
|
||||
|
||||
Logger.info(
|
||||
"VM directory initialization check",
|
||||
metadata: [
|
||||
"directory": dir.path,
|
||||
"config_path": configPath.path,
|
||||
"config_exists": "\(configExists)",
|
||||
"disk_path": diskPath.path,
|
||||
"disk_exists": "\(diskExists)",
|
||||
"nvram_path": nvramPath.path,
|
||||
"nvram_exists": "\(nvramExists)"
|
||||
]
|
||||
)
|
||||
// Logger.info(
|
||||
// "VM directory initialization check",
|
||||
// metadata: [
|
||||
// "directory": dir.path,
|
||||
// "config_path": configPath.path,
|
||||
// "config_exists": "\(configExists)",
|
||||
// "disk_path": diskPath.path,
|
||||
// "disk_exists": "\(diskExists)",
|
||||
// "nvram_path": nvramPath.path,
|
||||
// "nvram_exists": "\(nvramExists)"
|
||||
// ]
|
||||
// )
|
||||
|
||||
return configExists && diskExists && nvramExists
|
||||
}
|
||||
@@ -139,11 +139,19 @@ extension VMDirectory {
|
||||
|
||||
struct VNCSession: Codable {
|
||||
let url: String
|
||||
let sharedDirectories: [SharedDirectory]?
|
||||
|
||||
init(url: String, sharedDirectories: [SharedDirectory]? = nil) {
|
||||
self.url = url
|
||||
self.sharedDirectories = sharedDirectories
|
||||
}
|
||||
}
|
||||
|
||||
extension VMDirectory {
|
||||
/// Saves VNC session information to disk
|
||||
/// - Parameter session: The VNC session to save
|
||||
/// - Parameters:
|
||||
/// - session: The VNC session to save
|
||||
/// - sharedDirectories: Optional array of shared directories to save with the session
|
||||
/// - Throws: VMDirectoryError if the save operation fails
|
||||
func saveSession(_ session: VNCSession) throws {
|
||||
let encoder = JSONEncoder()
|
||||
|
||||
@@ -78,6 +78,23 @@ class VM {
|
||||
var details: VMDetails {
|
||||
let isRunning: Bool = self.isRunning
|
||||
let vncUrl = isRunning ? getVNCUrl() : nil
|
||||
|
||||
// Try to load shared directories from the session file
|
||||
var sharedDirs: [SharedDirectory]? = nil
|
||||
|
||||
// Check if sessions file exists and load shared directories
|
||||
let sessionsPath = vmDirContext.dir.sessionsPath.path
|
||||
let fileExists = FileManager.default.fileExists(atPath: sessionsPath)
|
||||
|
||||
do {
|
||||
if fileExists {
|
||||
let session = try vmDirContext.dir.loadSession()
|
||||
sharedDirs = session.sharedDirectories
|
||||
}
|
||||
} catch {
|
||||
// It's okay if we don't have a saved session
|
||||
Logger.error("Failed to load session data", metadata: ["name": vmDirContext.name, "error": "\(error)"])
|
||||
}
|
||||
|
||||
return VMDetails(
|
||||
name: vmDirContext.name,
|
||||
@@ -90,7 +107,8 @@ class VM {
|
||||
vncUrl: vncUrl,
|
||||
ipAddress: isRunning
|
||||
? DHCPLeaseParser.getIPAddress(forMAC: vmDirContext.config.macAddress!) : nil,
|
||||
locationName: vmDirContext.storage ?? "default"
|
||||
locationName: vmDirContext.storage ?? "default",
|
||||
sharedDirectories: sharedDirs
|
||||
)
|
||||
}
|
||||
|
||||
@@ -117,15 +135,17 @@ class VM {
|
||||
throw VMError.alreadyRunning(vmDirContext.name)
|
||||
}
|
||||
|
||||
// Keep track of shared directories for logging
|
||||
|
||||
Logger.info(
|
||||
"Running VM with configuration",
|
||||
metadata: [
|
||||
"cpuCount": "\(cpuCount)",
|
||||
"memorySize": "\(memorySize)",
|
||||
"diskSize": "\(vmDirContext.config.diskSize ?? 0)",
|
||||
"sharedDirectories": sharedDirectories.map(
|
||||
{ $0.string }
|
||||
).joined(separator: ", "),
|
||||
"macAddress": vmDirContext.config.macAddress ?? "none",
|
||||
"sharedDirectoryCount": "\(sharedDirectories.count)",
|
||||
"mount": mount?.path ?? "none",
|
||||
"vncPort": "\(vncPort)",
|
||||
"recoveryMode": "\(recoveryMode)",
|
||||
"usbMassStorageDeviceCount": "\(usbMassStoragePaths?.count ?? 0)",
|
||||
@@ -160,7 +180,7 @@ class VM {
|
||||
)
|
||||
virtualizationService = try virtualizationServiceFactory(config)
|
||||
|
||||
let vncInfo = try await setupVNC(noDisplay: noDisplay, port: vncPort)
|
||||
let vncInfo = try await setupSession(noDisplay: noDisplay, port: vncPort, sharedDirectories: sharedDirectories)
|
||||
Logger.info("VNC info", metadata: ["vncInfo": vncInfo])
|
||||
|
||||
// Start the VM
|
||||
@@ -391,7 +411,8 @@ class VM {
|
||||
return vncService.url
|
||||
}
|
||||
|
||||
private func setupVNC(noDisplay: Bool, port: Int = 0) async throws -> String {
|
||||
/// Sets up the VNC service and returns the VNC URL
|
||||
private func startVNCService(port: Int = 0) async throws -> String {
|
||||
guard let service = virtualizationService else {
|
||||
throw VMError.internalError("Virtualization service not initialized")
|
||||
}
|
||||
@@ -401,12 +422,40 @@ class VM {
|
||||
guard let url = vncService.url else {
|
||||
throw VMError.vncNotConfigured
|
||||
}
|
||||
|
||||
|
||||
return url
|
||||
}
|
||||
|
||||
/// Saves the session information including shared directories to disk
|
||||
private func saveSessionData(url: String, sharedDirectories: [SharedDirectory]) {
|
||||
do {
|
||||
let session = VNCSession(url: url, sharedDirectories: sharedDirectories.isEmpty ? nil : sharedDirectories)
|
||||
try vmDirContext.dir.saveSession(session)
|
||||
Logger.info("Saved VNC session with shared directories",
|
||||
metadata: [
|
||||
"count": "\(sharedDirectories.count)",
|
||||
"dirs": "\(sharedDirectories.map { $0.hostPath }.joined(separator: ", "))",
|
||||
"sessionsPath": "\(vmDirContext.dir.sessionsPath.path)"
|
||||
])
|
||||
} catch {
|
||||
Logger.error("Failed to save VNC session", metadata: ["error": "\(error)"])
|
||||
}
|
||||
}
|
||||
|
||||
/// Main session setup method that handles VNC and persists session data
|
||||
private func setupSession(noDisplay: Bool, port: Int = 0, sharedDirectories: [SharedDirectory] = []) async throws -> String {
|
||||
// Start the VNC service and get the URL
|
||||
let url = try await startVNCService(port: port)
|
||||
|
||||
// Save the session data
|
||||
saveSessionData(url: url, sharedDirectories: sharedDirectories)
|
||||
|
||||
// Open the VNC client if needed
|
||||
if !noDisplay {
|
||||
Logger.info("Starting VNC session")
|
||||
try await vncService.openClient(url: url)
|
||||
}
|
||||
|
||||
|
||||
return url
|
||||
}
|
||||
|
||||
@@ -550,7 +599,7 @@ class VM {
|
||||
)
|
||||
virtualizationService = try virtualizationServiceFactory(config)
|
||||
|
||||
let vncInfo = try await setupVNC(noDisplay: noDisplay, port: vncPort)
|
||||
let vncInfo = try await setupSession(noDisplay: noDisplay, port: vncPort, sharedDirectories: sharedDirectories)
|
||||
Logger.info("VNC info", metadata: ["vncInfo": vncInfo])
|
||||
|
||||
// Start the VM
|
||||
|
||||
@@ -40,6 +40,7 @@ struct VMDetails: Codable {
|
||||
let vncUrl: String?
|
||||
let ipAddress: String?
|
||||
let locationName: String
|
||||
let sharedDirectories: [SharedDirectory]?
|
||||
|
||||
init(
|
||||
name: String,
|
||||
@@ -51,7 +52,8 @@ struct VMDetails: Codable {
|
||||
status: String,
|
||||
vncUrl: String?,
|
||||
ipAddress: String?,
|
||||
locationName: String
|
||||
locationName: String,
|
||||
sharedDirectories: [SharedDirectory]? = nil
|
||||
) {
|
||||
self.name = name
|
||||
self.os = os
|
||||
@@ -63,5 +65,6 @@ struct VMDetails: Codable {
|
||||
self.vncUrl = vncUrl
|
||||
self.ipAddress = ipAddress
|
||||
self.locationName = locationName
|
||||
self.sharedDirectories = sharedDirectories
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,16 @@ enum VMDetailsPrinter {
|
||||
$0.status
|
||||
}),
|
||||
Column(header: "storage", width: 16, getValue: { $0.locationName }),
|
||||
Column(
|
||||
header: "shared_dirs", width: 54,
|
||||
getValue: { vm in
|
||||
// Only show shared directories if the VM is running
|
||||
if vm.status == "running", let dirs = vm.sharedDirectories, !dirs.isEmpty {
|
||||
return dirs.map { "\($0.hostPath) (\($0.readOnly ? "ro" : "rw"))" }.joined(separator: ", ")
|
||||
} else {
|
||||
return "-"
|
||||
}
|
||||
}),
|
||||
Column(
|
||||
header: "ip", width: 16,
|
||||
getValue: {
|
||||
@@ -56,7 +66,9 @@ enum VMDetailsPrinter {
|
||||
print(jsonString)
|
||||
} else {
|
||||
printHeader(print: print)
|
||||
vms.forEach({ printVM($0, print: print) })
|
||||
vms.forEach({ vm in
|
||||
printVM(vm, print: print)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ cleanup() {
|
||||
set +e # Don't exit on error in cleanup
|
||||
echo "[cleanup] Caught signal, shutting down..."
|
||||
echo "[cleanup] Stopping VM..."
|
||||
stop_vm
|
||||
stop_vm true
|
||||
# Now gently stop noVNC proxy if running
|
||||
# if [ -n "${NOVNC_PID:-}" ] && kill -0 "$NOVNC_PID" 2>/dev/null; then
|
||||
# echo "[cleanup] Stopping noVNC proxy (PID $NOVNC_PID)..."
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Source constants if running in container context
|
||||
if [ -f "/run/config/constants.sh" ]; then
|
||||
source "/run/config/constants.sh"
|
||||
fi
|
||||
|
||||
# Define server address with fallback
|
||||
SERVER="${TUNNEL_HOST:-host.docker.internal}:${TUNNEL_PORT:-8080}"
|
||||
|
||||
# Extract the base name of the command and arguments
|
||||
command=$(basename "$0")
|
||||
subcommand="$1"
|
||||
shift
|
||||
args="$@"
|
||||
|
||||
command="$command $subcommand $args"
|
||||
|
||||
# Concatenate command and any stdin data
|
||||
full_data="$command"
|
||||
if [ ! -t 0 ]; then
|
||||
stdin_data=$(cat)
|
||||
if [ -n "$stdin_data" ]; then
|
||||
# Format full_data to include stdin data
|
||||
full_data="$full_data << 'EOF'
|
||||
$stdin_data
|
||||
EOF"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Trim leading/trailing whitespace and newlines
|
||||
full_data=$(echo -e "$full_data" | sed 's/^[ \t\n]*//;s/[ \t\n]*$//')
|
||||
|
||||
# Log command if debug is enabled
|
||||
if [ "${LUMIER_DEBUG:-0}" -eq 1 ]; then
|
||||
echo "Executing lume command: $full_data" >&2
|
||||
echo "Sending to: $SERVER" >&2
|
||||
fi
|
||||
|
||||
# Use curl with -N to disable output buffering and -s for silent mode
|
||||
curl -N -s -X POST \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary @- \
|
||||
"http://$SERVER" <<< "$full_data"
|
||||
@@ -1,9 +1,37 @@
|
||||
lifecycle_folder="$SHARED_FOLDER_PATH/lifecycle"
|
||||
on_logon_script="$lifecycle_folder/on-logon.sh"
|
||||
#!/bin/bash
|
||||
|
||||
if [ -f "$on_logon_script" ]; then
|
||||
chmod +x "$on_logon_script"
|
||||
source "$on_logon_script"
|
||||
# Arguments passed from execute_remote_script in vm.sh
|
||||
# $1: VNC_PASSWORD
|
||||
# $2: HOST_SHARED_PATH (Path inside VM where host shared dir is mounted, e.g., /Volumes/My Shared Files)
|
||||
|
||||
VNC_PASSWORD="$1"
|
||||
HOST_SHARED_PATH="$2"
|
||||
|
||||
# Define the path to the user's optional on-logon script within the shared folder
|
||||
USER_ON_LOGON_SCRIPT_PATH="$HOST_SHARED_PATH/lifecycle/on-logon.sh"
|
||||
|
||||
echo "[Remote] Lumier entry point script starting..."
|
||||
echo "[Remote] Checking for user script at: $USER_ON_LOGON_SCRIPT_PATH"
|
||||
|
||||
# Check if the user-provided script exists
|
||||
if [ -f "$USER_ON_LOGON_SCRIPT_PATH" ]; then
|
||||
echo "[Remote] Found user script. Making executable and running..."
|
||||
chmod +x "$USER_ON_LOGON_SCRIPT_PATH"
|
||||
|
||||
# Execute the user script in a subshell, passing VNC password and shared path as arguments
|
||||
"$USER_ON_LOGON_SCRIPT_PATH" "$VNC_PASSWORD" "$HOST_SHARED_PATH"
|
||||
|
||||
# Capture exit code (optional, but good practice)
|
||||
USER_SCRIPT_EXIT_CODE=$?
|
||||
echo "[Remote] User script finished with exit code: $USER_SCRIPT_EXIT_CODE."
|
||||
|
||||
# Propagate the exit code if non-zero (optional)
|
||||
# if [ $USER_SCRIPT_EXIT_CODE -ne 0 ]; then
|
||||
# exit $USER_SCRIPT_EXIT_CODE
|
||||
# fi
|
||||
else
|
||||
echo "No on-logon script found in $lifecycle_folder"
|
||||
echo "[Remote] No user-provided on-logon script found at $USER_ON_LOGON_SCRIPT_PATH. Skipping."
|
||||
fi
|
||||
|
||||
echo "[Remote] Lumier entry point script finished."
|
||||
exit 0 # Ensure the entry point script exits cleanly if no user script or user script succeeded
|
||||
|
||||
@@ -83,7 +83,7 @@ execute_remote_script() {
|
||||
echo "[DEBUG] Script path: $script_path"
|
||||
|
||||
# Use a here-document to send the script content
|
||||
sshpass -p "$password" ssh -o StrictHostKeyChecking=no "$user@$host" "bash -s" <<EOF
|
||||
sshpass -p "$password" ssh -o StrictHostKeyChecking=no "$user@$host" "bash -s -- '$vnc_password' '$data_folder'" <<EOF
|
||||
$script_content
|
||||
EOF
|
||||
|
||||
|
||||
@@ -54,6 +54,8 @@ start_vm() {
|
||||
lume_run $SHARED_DIR_ARGS --storage "$STORAGE_PATH" "$VM_NAME" &
|
||||
# lume run "$VM_NAME" --storage "$STORAGE_PATH" --no-display
|
||||
|
||||
# sleep 10000000
|
||||
|
||||
# Wait for VM to be running and VNC URL to be available
|
||||
vm_ip=""
|
||||
vnc_url=""
|
||||
@@ -64,18 +66,16 @@ start_vm() {
|
||||
# Get VM info as JSON using the API function
|
||||
VM_INFO=$(lume_get "$VM_NAME" "$STORAGE_PATH")
|
||||
# VM_INFO=$(lume get "$VM_NAME" --storage "$STORAGE_PATH" -f json 2>/dev/null)
|
||||
echo "VM_INFO: $VM_INFO"
|
||||
|
||||
# Check if VM has status 'running'
|
||||
if [[ $VM_INFO == *'"status" : "running"'* ]]; then
|
||||
# Extract IP address using the existing function from utils.sh
|
||||
vm_ip=$(extract_json_field "ipAddress" "$VM_INFO")
|
||||
# Extract VNC URL using the existing function from utils.sh
|
||||
vnc_url=$(extract_json_field "vncUrl" "$VM_INFO")
|
||||
|
||||
# If we have both IP and VNC URL, break the loop
|
||||
if [ -n "$vm_ip" ] && [ -n "$vnc_url" ]; then
|
||||
break
|
||||
fi
|
||||
# Extract status, IP address, and VNC URL using the helper function
|
||||
vm_status=$(extract_json_field "status" "$VM_INFO")
|
||||
vm_ip=$(extract_json_field "ipAddress" "$VM_INFO")
|
||||
vnc_url=$(extract_json_field "vncUrl" "$VM_INFO")
|
||||
|
||||
# Check if VM status is 'running' and we have IP and VNC URL
|
||||
if [ "$vm_status" = "running" ] && [ -n "$vm_ip" ] && [ -n "$vnc_url" ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
@@ -134,7 +134,7 @@ lume_get() {
|
||||
fi
|
||||
|
||||
# Always log the curl command before sending
|
||||
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] INFO: Executing curl request: $api_url"
|
||||
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] INFO: Executing curl request: $api_url" >&2
|
||||
|
||||
# Make the API call
|
||||
local response=$(curl --connect-timeout 6000 \
|
||||
@@ -189,20 +189,29 @@ lume_set() {
|
||||
}
|
||||
|
||||
stop_vm() {
|
||||
local in_cleanup=${1:-false} # Optional first argument to indicate if called from cleanup trap
|
||||
echo "Stopping VM '$VM_NAME'..."
|
||||
STORAGE_PATH="$HOST_STORAGE_PATH"
|
||||
# Check if the VM exists and is running
|
||||
echo "STORAGE_PATH: $STORAGE_PATH"
|
||||
|
||||
VM_INFO=$(lume_get "$VM_NAME" "$STORAGE_PATH")
|
||||
if [[ -z "$VM_INFO" || $VM_INFO == *"Virtual machine not found"* ]]; then
|
||||
echo "VM '$VM_NAME' does not exist."
|
||||
elif [[ $VM_INFO == *'"status" : "running"'* ]]; then
|
||||
vm_status=$(extract_json_field "status" "$VM_INFO")
|
||||
|
||||
if [ "$vm_status" == "running" ]; then
|
||||
echo "VM '$VM_NAME' status is 'running'. Attempting stop."
|
||||
lume_stop "$VM_NAME" "$STORAGE_PATH"
|
||||
echo "VM '$VM_NAME' was running and is now stopped."
|
||||
elif [[ $VM_INFO == *'"status" : "stopped"'* ]]; then
|
||||
echo "VM '$VM_NAME' stop command issued."
|
||||
elif [ "$vm_status" == "stopped" ]; then
|
||||
echo "VM '$VM_NAME' is already stopped."
|
||||
elif [ "$in_cleanup" = true ]; then
|
||||
# If we are in the cleanup trap and status is unknown or VM not found,
|
||||
# still attempt a stop just in case.
|
||||
echo "VM status is unknown ('$vm_status') or VM not found during cleanup. Attempting stop anyway."
|
||||
lume_stop "$VM_NAME" "$STORAGE_PATH"
|
||||
sleep 5000
|
||||
echo "VM '$VM_NAME' stop command issued as a precaution."
|
||||
else
|
||||
echo "Unknown VM status for '$VM_NAME'."
|
||||
echo "VM status is unknown ('$vm_status') or VM not found. Not attempting stop."
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user