Change lume serve default port, stabilize lumier vm provider

This commit is contained in:
f-trycua
2025-05-12 16:34:36 -07:00
parent d0a0a344a0
commit e47b0f61f0
21 changed files with 838 additions and 162 deletions

View File

@@ -49,7 +49,7 @@ RUN rm -rf /app/* /app/.??*
# Note: This Docker image doesn't contain the lume executable (macOS-specific)
# Instead, it relies on connecting to a lume server running on the host machine
# via host.docker.internal:3000
# via host.docker.internal:7777
# Default command
CMD ["bash"]

View File

@@ -122,7 +122,7 @@ As an alternative to installing directly on your host machine, you can use Docke
### Prerequisites
- Docker installed on your machine
- Lume server running on your host (port 3000): `lume serve`
- Lume server running on your host (port 7777): `lume serve`
### Setup and Usage
@@ -156,10 +156,10 @@ The Docker development environment:
- Installs all required Python dependencies in the container
- Mounts your source code from the host at runtime
- Automatically configures the connection to use host.docker.internal:3000 for accessing the Lume server on your host machine
- Automatically configures the connection to use host.docker.internal:7777 for accessing the Lume server on your host machine
- Preserves your code changes without requiring rebuilds (source code is mounted as a volume)
> **Note**: The Docker container doesn't include the macOS-specific Lume executable. Instead, it connects to the Lume server running on your host machine via host.docker.internal:3000. Make sure to start the Lume server on your host before running examples in the container.
> **Note**: The Docker container doesn't include the macOS-specific Lume executable. Instead, it connects to the Lume server running on your host machine via host.docker.internal:7777. Make sure to start the Lume server on your host before running examples in the container.
## Cleanup and Reset

View File

@@ -105,13 +105,13 @@ This is typically due to known instability issues with the `lume serve` backgrou
### How do I troubleshoot Computer not connecting to lume daemon?
If you're experiencing connection issues between Computer and the lume daemon, it could be because the port 3000 (used by lume) is already in use by an orphaned process. You can diagnose this issue with:
If you're experiencing connection issues between Computer and the lume daemon, it could be because the port 7777 (used by lume) is already in use by an orphaned process. You can diagnose this issue with:
```bash
sudo lsof -i :3000
sudo lsof -i :7777
```
This command will show all processes using port 3000. If you see a lume process already running, you can terminate it with:
This command will show all processes using port 7777. If you see a lume process already running, you can terminate it with:
```bash
kill <PID>

View File

@@ -34,17 +34,18 @@ async def main():
cpu="4",
os_type="macos",
verbosity=LogLevel.NORMAL, # Use QUIET to suppress most logs
provider_type=VMProviderType.LUME,
provider_type=VMProviderType.LUMIER,
storage="/Users/francescobonacci/repos/trycua/computer/examples/storage",
# shared_directories=[
# "/Users/francescobonacci/repos/trycua/computer/examples/shared"
# ]
shared_directories=[
"/Users/francescobonacci/repos/trycua/computer/examples/shared"
],
ephemeral=True
)
try:
# Run the computer with default parameters
await computer.run()
await computer.interface.hotkey("command", "space")
# res = await computer.interface.run_command("touch ./Downloads/empty_file")

View File

@@ -4,7 +4,7 @@ from pylume import PyLume, ImageRef, VMRunOpts, SharedDirectory, VMConfig, VMUpd
async def main():
"""Example usage of PyLume."""
async with PyLume(port=3000, use_existing_server=False, debug=True) as pylume:
async with PyLume(port=7777, use_existing_server=False, debug=True) as pylume:
# Get latest IPSW URL
print("\n=== Getting Latest IPSW URL ===")

View File

@@ -34,10 +34,11 @@ class Computer:
verbosity: Union[int, LogLevel] = logging.INFO,
telemetry_enabled: bool = True,
provider_type: Union[str, VMProviderType] = VMProviderType.LUME,
port: Optional[int] = 3000,
port: Optional[int] = 7777,
noVNC_port: Optional[int] = 8006,
host: str = os.environ.get("PYLUME_HOST", "localhost"),
storage: Optional[str] = None # Path for persistent VM storage (Lumier provider)
storage: Optional[str] = None,
ephemeral: bool = False
):
"""Initialize a new Computer instance.
@@ -62,6 +63,7 @@ class Computer:
noVNC_port: Optional port for the noVNC web interface (Lumier provider)
host: Host to use for VM provider connections (e.g. "localhost", "host.docker.internal")
storage: Optional path for persistent VM storage (Lumier provider)
ephemeral: Whether to use ephemeral storage
"""
self.logger = Logger("cua.computer", verbosity)
@@ -74,8 +76,13 @@ class Computer:
self.host = host
self.os_type = os_type
self.provider_type = provider_type
self.storage = storage
self.ephemeral = ephemeral
if ephemeral:
self.storage = "ephemeral"
else:
self.storage = storage
# For Lumier provider, store the first shared directory path to use
# for VM file sharing
self.shared_path = None
@@ -150,7 +157,6 @@ class Computer:
async def __aenter__(self):
"""Enter async context manager."""
await self.run()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
@@ -211,10 +217,19 @@ class Computer:
# Configure provider based on initialization parameters
provider_kwargs = {
"storage": self.storage,
"verbose": self.verbosity >= LogLevel.DEBUG,
"ephemeral": self.ephemeral, # Pass ephemeral flag to provider
}
# Handle storage path separately from ephemeral flag
if self.ephemeral:
self.logger.info("Using ephemeral storage and ephemeral VMs")
# Use ephemeral storage location
provider_kwargs["storage"] = "ephemeral"
else:
# Use explicitly configured storage
provider_kwargs["storage"] = self.storage
# VM name is already set in self.config.name and will be used when calling provider methods
# For Lumier provider, add specific configuration
@@ -281,33 +296,54 @@ class Computer:
for path in self.shared_directories:
self.logger.verbose(f"Adding shared directory: {path}")
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
if os.path.exists(path):
# Add path in format expected by Lume API
shared_dirs.append({
"hostPath": path,
"readOnly": False
})
else:
self.logger.warning(f"Shared directory does not exist: {path}")
# Define VM run options
run_opts = {
"noDisplay": False,
"sharedDirectories": shared_dirs,
"display": self.config.display,
"memory": self.config.memory,
"cpu": self.config.cpu
}
# For Lumier provider, pass the noVNC_port if specified
if self.provider_type == VMProviderType.LUMIER and self.noVNC_port is not None:
run_opts["noVNC_port"] = self.noVNC_port
self.logger.info(f"Using noVNC_port {self.noVNC_port} for Lumier provider")
self.logger.info(f"VM run options: {run_opts}")
# Prepare run options to pass to the provider
run_opts = {}
# Add display information if available
if self.config.display is not None:
display_info = {
"width": self.config.display.width,
"height": self.config.display.height,
}
# Check if scale_factor exists before adding it
if hasattr(self.config.display, "scale_factor"):
display_info["scale_factor"] = self.config.display.scale_factor
run_opts["display"] = display_info
# Add shared directories if available
if self.shared_directories:
run_opts["shared_directories"] = shared_dirs.copy()
# Run the VM with the provider
try:
if self.config.vm_provider is None:
raise RuntimeError(f"VM provider not initialized for {self.config.name}")
# Use the complete run_opts we prepared earlier
# Handle ephemeral storage for run_vm method too
storage_param = "ephemeral" if self.ephemeral else self.storage
# Log the image being used
self.logger.info(f"Running VM using image: {self.image}")
# Call provider.run_vm with explicit image parameter
response = await self.config.vm_provider.run_vm(
name=self.config.name,
run_opts=run_opts,
storage=self.storage # Pass storage explicitly for clarity
)
image=self.image,
name=self.config.name,
run_opts=run_opts,
storage=storage_param
)
self.logger.info(f"VM run response: {response if response else 'None'}")
except Exception as run_error:
self.logger.error(f"Failed to run VM: {run_error}")
@@ -316,9 +352,13 @@ class Computer:
# Wait for VM to be ready with a valid IP address
self.logger.info("Waiting for VM to be ready with a valid IP address...")
try:
# Use the enhanced get_ip method that includes retry logic
max_retries = 30 # Increased for initial VM startup
retry_delay = 2 # 2 seconds between retries
# Increased values for Lumier provider which needs more time for initial setup
if self.provider_type == VMProviderType.LUMIER:
max_retries = 60 # Increased for Lumier VM startup which takes longer
retry_delay = 3 # 3 seconds between retries for Lumier
else:
max_retries = 30 # Default for other providers
retry_delay = 2 # 2 seconds between retries
self.logger.info(f"Waiting up to {max_retries * retry_delay} seconds for VM to be ready...")
ip = await self.get_ip(max_retries=max_retries, retry_delay=retry_delay)
@@ -419,65 +459,38 @@ class Computer:
async def get_ip(self, max_retries: int = 15, retry_delay: int = 2) -> str:
"""Get the IP address of the VM or localhost if using host computer server.
This method delegates to the provider's get_ip method, which waits indefinitely
until the VM has a valid IP address.
Args:
max_retries: Maximum number of retries to get the IP (default: 15)
max_retries: Unused parameter, kept for backward compatibility
retry_delay: Delay between retries in seconds (default: 2)
Returns:
IP address of the VM or localhost if using host computer server
Raises:
TimeoutError: If unable to get a valid IP address after retries
"""
# For host computer server, always return localhost immediately
if self.use_host_computer_server:
return "127.0.0.1"
# Try multiple times to get a valid IP
for attempt in range(1, max_retries + 1):
if attempt > 1:
self.logger.info(f"Retrying to get VM IP address (attempt {attempt}/{max_retries})...")
try:
# Get VM information from the provider
if self.config.vm_provider is None:
raise RuntimeError("VM provider is not initialized")
# Get VM info from provider with explicit storage parameter
vm_info = await self.config.vm_provider.get_vm(
name=self.config.name,
storage=self.storage # Pass storage explicitly for clarity
)
# Check if we got a valid IP
ip = vm_info.get("ip_address", None)
if ip and ip != "unknown" and not ip.startswith("0.0.0.0"):
self.logger.info(f"Got valid VM IP address: {ip}")
return ip
# Check the VM status
status = vm_info.get("status", "unknown")
# If the VM is in a non-running state (stopped, paused, etc.)
# raise a more informative error instead of waiting
if status in ["stopped"]:
raise RuntimeError(f"VM is not running yet (status: {status})")
# If VM is starting or initializing, wait and retry
if status != "running":
self.logger.info(f"VM is not running yet (status: {status}). Waiting...")
await asyncio.sleep(retry_delay)
continue
# If VM is running but no IP yet, wait and retry
self.logger.info("VM is running but no valid IP address yet. Waiting...")
except Exception as e:
self.logger.warning(f"Error getting VM IP: {e}")
await asyncio.sleep(retry_delay)
# If we get here, we couldn't get a valid IP after all retries
raise TimeoutError(f"Failed to get valid IP address for VM {self.config.name} after {max_retries} attempts")
# Get IP from the provider - each provider implements its own waiting logic
if self.config.vm_provider is None:
raise RuntimeError("VM provider is not initialized")
# Log that we're waiting for the IP
self.logger.info(f"Waiting for VM {self.config.name} to get an IP address...")
# Call the provider's get_ip method which will wait indefinitely
storage_param = "ephemeral" if self.ephemeral else self.storage
ip = await self.config.vm_provider.get_ip(
name=self.config.name,
storage=storage_param,
retry_delay=retry_delay
)
# Log success
self.logger.info(f"VM {self.config.name} has IP address: {ip}")
return ip
async def wait_vm_ready(self) -> Optional[Dict[str, Any]]:

View File

@@ -46,10 +46,11 @@ class BaseVMProvider(AsyncContextManager):
pass
@abc.abstractmethod
async def run_vm(self, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
async def run_vm(self, image: str, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Run a VM by name with the given options.
Args:
image: Name/tag of the image to use
name: Name of the VM to run
run_opts: Dictionary of run options (memory, cpu, etc.)
storage: Optional storage path override. If provided, this will be used
@@ -88,3 +89,18 @@ class BaseVMProvider(AsyncContextManager):
Dictionary with VM update status and information
"""
pass
@abc.abstractmethod
async def get_ip(self, name: str, storage: Optional[str] = None, retry_delay: int = 2) -> str:
"""Get the IP address of a VM, waiting indefinitely until it's available.
Args:
name: Name of the VM to get the IP for
storage: Optional storage path override. If provided, this will be used
instead of the provider's default storage path.
retry_delay: Delay between retries in seconds (default: 2)
Returns:
IP address of the VM when it becomes available
"""
pass

View File

@@ -14,7 +14,7 @@ class VMProviderFactory:
@staticmethod
def create_provider(
provider_type: Union[str, VMProviderType],
port: Optional[int] = None,
port: int = 7777,
host: str = "localhost",
bin_path: Optional[str] = None,
storage: Optional[str] = None,
@@ -63,9 +63,9 @@ class VMProviderFactory:
return LumeProvider(
port=port,
host=host,
bin_path=bin_path,
storage=storage,
verbose=verbose
verbose=verbose,
ephemeral=ephemeral
)
except ImportError as e:
logger.error(f"Failed to import LumeProvider: {e}")

View File

@@ -8,8 +8,9 @@ import os
import re
import asyncio
import json
import subprocess
import logging
import subprocess
import urllib.parse
from typing import Dict, Any, Optional, List, Tuple
from ..base import BaseVMProvider, VMProviderType
@@ -19,6 +20,7 @@ from ..lume_api import (
lume_api_run,
lume_api_stop,
lume_api_update,
lume_api_pull,
HAS_CURL,
parse_memory
)
@@ -26,7 +28,6 @@ from ..lume_api import (
# Setup logging
logger = logging.getLogger(__name__)
class LumeProvider(BaseVMProvider):
"""Lume VM provider implementation using direct curl commands.
@@ -36,18 +37,17 @@ class LumeProvider(BaseVMProvider):
def __init__(
self,
port: Optional[int] = None,
port: int = 7777,
host: str = "localhost",
bin_path: Optional[str] = None,
storage: Optional[str] = None,
verbose: bool = False,
ephemeral: bool = False,
):
"""Initialize the Lume provider.
Args:
port: Port for the Lume API server (default: 3000)
port: Port for the Lume API server (default: 7777)
host: Host to use for API connections (default: localhost)
bin_path: Optional path to the Lume binary (not used directly)
storage: Path to store VM data
verbose: Enable verbose logging
"""
@@ -58,14 +58,16 @@ class LumeProvider(BaseVMProvider):
)
self.host = host
self.port = port or 3000 # Default port for Lume API
self.port = port # Default port for Lume API
self.storage = storage
self.bin_path = bin_path
self.verbose = verbose
self.ephemeral = ephemeral # If True, VMs will be deleted after stopping
# Base API URL for Lume API calls
self.api_base_url = f"http://{self.host}:{self.port}"
self.logger = logging.getLogger(__name__)
@property
def provider_type(self) -> VMProviderType:
"""Get the provider type."""
@@ -284,14 +286,256 @@ class LumeProvider(BaseVMProvider):
else:
return []
async def run_vm(self, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Run a VM with the given options."""
return self._lume_api_run(name, run_opts, debug=self.verbose)
async def run_vm(self, image: str, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Run a VM with the given options.
If the VM does not exist in the storage location, this will attempt to pull it
from the Lume registry first.
Args:
image: Image name to use when pulling the VM if it doesn't exist
name: Name of the VM to run
run_opts: Dictionary of run options (memory, cpu, etc.)
storage: Optional storage path override. If provided, this will be used
instead of the provider's default storage path.
Returns:
Dictionary with VM run status and information
"""
# First check if VM exists by trying to get its info
vm_info = await self.get_vm(name, storage=storage)
if "error" in vm_info:
# VM doesn't exist, try to pull it
self.logger.info(f"VM {name} not found, attempting to pull image {image} from registry...")
# Call pull_vm with the image parameter
pull_result = await self.pull_vm(
name=name,
image=image,
storage=storage
)
# Check if pull was successful
if "error" in pull_result:
self.logger.error(f"Failed to pull VM image: {pull_result['error']}")
return pull_result # Return the error from pull
self.logger.info(f"Successfully pulled VM image {image} as {name}")
# Now run the VM with the given options
self.logger.info(f"Running VM {name} with options: {run_opts}")
from ..lume_api import lume_api_run
return lume_api_run(
vm_name=name,
host=self.host,
port=self.port,
run_opts=run_opts,
storage=storage if storage is not None else self.storage,
debug=self.verbose,
verbose=self.verbose
)
async def stop_vm(self, name: str, storage: Optional[str] = None) -> Dict[str, Any]:
"""Stop a running VM."""
return self._lume_api_stop(name, debug=self.verbose)
"""Stop a running VM.
If this provider was initialized with ephemeral=True, the VM will also
be deleted after it is stopped.
Args:
name: Name of the VM to stop
storage: Optional storage path override
Returns:
Dictionary with stop status and information
"""
# Stop the VM first
stop_result = self._lume_api_stop(name, debug=self.verbose)
# Log ephemeral status for debugging
self.logger.info(f"Ephemeral mode status: {self.ephemeral}")
# If ephemeral mode is enabled, delete the VM after stopping
if self.ephemeral and (stop_result.get("success", False) or "error" not in stop_result):
self.logger.info(f"Ephemeral mode enabled - deleting VM {name} after stopping")
try:
delete_result = await self.delete_vm(name, storage=storage)
# Return combined result
return {
**stop_result, # Include all stop result info
"deleted": True,
"delete_result": delete_result
}
except Exception as e:
self.logger.error(f"Failed to delete ephemeral VM {name}: {e}")
# Include the error but still return stop result
return {
**stop_result,
"deleted": False,
"delete_error": str(e)
}
# Just return the stop result if not ephemeral
return stop_result
async def pull_vm(
self,
name: str,
image: str,
storage: Optional[str] = None,
registry: str = "ghcr.io",
organization: str = "trycua",
pull_opts: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Pull a VM image from the registry.
Args:
name: Name for the VM after pulling
image: The image name to pull (e.g. 'macos-sequoia-cua:latest')
storage: Optional storage path to use
registry: Registry to pull from (default: ghcr.io)
organization: Organization in registry (default: trycua)
pull_opts: Additional options for pulling the VM (optional)
Returns:
Dictionary with information about the pulled VM
Raises:
RuntimeError: If pull operation fails or image is not provided
"""
# Validate image parameter
if not image:
raise ValueError("Image parameter is required for pull_vm")
self.logger.info(f"Pulling VM image '{image}' as '{name}'")
self.logger.info("You can check the pull progress using: lume logs -f")
# Set default pull_opts if not provided
if pull_opts is None:
pull_opts = {}
# Log information about the operation
self.logger.debug(f"Pull storage location: {storage or 'default'}")
try:
# Call the lume_api_pull function from lume_api.py
from ..lume_api import lume_api_pull
result = lume_api_pull(
image=image,
name=name,
host=self.host,
port=self.port,
storage=storage if storage is not None else self.storage,
registry=registry,
organization=organization,
debug=self.verbose,
verbose=self.verbose
)
# Check for errors in the result
if "error" in result:
self.logger.error(f"Failed to pull VM image: {result['error']}")
return result
self.logger.info(f"Successfully pulled VM image '{image}' as '{name}'")
return result
except Exception as e:
self.logger.error(f"Failed to pull VM image '{image}': {e}")
return {"error": f"Failed to pull VM: {str(e)}"}
async def delete_vm(self, name: str, storage: Optional[str] = None) -> Dict[str, Any]:
"""Delete a VM permanently.
Args:
name: Name of the VM to delete
storage: Optional storage path override
Returns:
Dictionary with delete status and information
"""
self.logger.info(f"Deleting VM {name}...")
try:
# Call the lume_api_delete function we created
from ..lume_api import lume_api_delete
result = lume_api_delete(
vm_name=name,
host=self.host,
port=self.port,
storage=storage if storage is not None else self.storage,
debug=self.verbose,
verbose=self.verbose
)
# Check for errors in the result
if "error" in result:
self.logger.error(f"Failed to delete VM: {result['error']}")
return result
self.logger.info(f"Successfully deleted VM '{name}'")
return result
except Exception as e:
self.logger.error(f"Failed to delete VM '{name}': {e}")
return {"error": f"Failed to delete VM: {str(e)}"}
async def update_vm(self, name: str, update_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Update VM configuration."""
return self._lume_api_update(name, update_opts, debug=self.verbose)
async def get_ip(self, name: str, storage: Optional[str] = None, retry_delay: int = 2) -> str:
"""Get the IP address of a VM, waiting indefinitely until it's available.
Args:
name: Name of the VM to get the IP for
storage: Optional storage path override
retry_delay: Delay between retries in seconds (default: 2)
Returns:
IP address of the VM when it becomes available
"""
# Track total attempts for logging purposes
total_attempts = 0
# Loop indefinitely until we get a valid IP
while True:
total_attempts += 1
# Log retry message but not on first attempt
if total_attempts > 1:
self.logger.info(f"Waiting for VM {name} IP address (attempt {total_attempts})...")
try:
# Get VM information
vm_info = await self.get_vm(name, storage=storage)
# Check if we got a valid IP
ip = vm_info.get("ip_address", None)
if ip and ip != "unknown" and not ip.startswith("0.0.0.0"):
self.logger.info(f"Got valid VM IP address: {ip}")
return ip
# Check the VM status
status = vm_info.get("status", "unknown")
# If VM is not running yet, log and wait
if status != "running":
self.logger.info(f"VM is not running yet (status: {status}). Waiting...")
# If VM is running but no IP yet, wait and retry
else:
self.logger.info("VM is running but no valid IP address yet. Waiting...")
except Exception as e:
self.logger.warning(f"Error getting VM {name} IP: {e}, continuing to wait...")
# Wait before next retry
await asyncio.sleep(retry_delay)
# Add progress log every 10 attempts
if total_attempts % 10 == 0:
self.logger.info(f"Still waiting for VM {name} IP after {total_attempts} attempts...")

View File

@@ -168,9 +168,14 @@ def lume_api_run(
payload["storage"] = run_opts["storage"]
# Add shared directories if specified
if "shared_directories" in run_opts:
if "shared_directories" in run_opts and run_opts["shared_directories"]:
payload["sharedDirectories"] = run_opts["shared_directories"]
# Log the payload for debugging
if debug or verbose:
print(f"DEBUG: Payload for {vm_name} run request: {json.dumps(payload, indent=2)}")
logger.debug(f"API payload: {json.dumps(payload, indent=2)}")
# Construct the curl command
cmd = [
"curl", "--connect-timeout", "30", "--max-time", "30",
@@ -347,6 +352,180 @@ def lume_api_update(
return {"error": f"Failed to execute update request: {str(e)}"}
def lume_api_pull(
image: str,
name: str,
host: str,
port: int,
storage: Optional[str] = None,
registry: str = "ghcr.io",
organization: str = "trycua",
debug: bool = False,
verbose: bool = False
) -> Dict[str, Any]:
"""Pull a VM image from a registry using curl.
Args:
image: Name/tag of the image to pull
name: Name to give the VM after pulling
host: API host
port: API port
storage: Storage path for the VM
registry: Registry to pull from (default: ghcr.io)
organization: Organization in registry (default: trycua)
debug: Whether to show debug output
verbose: Enable verbose logging
Returns:
Dictionary with pull status and information
"""
# Prepare pull request payload
pull_payload = {
"image": image, # Use provided image name
"name": name, # Always use name as the target VM name
"registry": registry,
"organization": organization
}
if storage:
pull_payload["storage"] = storage
# Construct pull command with proper JSON payload
pull_cmd = [
"curl"
]
if not verbose:
pull_cmd.append("-s")
pull_cmd.extend([
"-X", "POST",
"-H", "Content-Type: application/json",
"-d", json.dumps(pull_payload),
f"http://{host}:{port}/lume/pull"
])
if debug or verbose:
print(f"DEBUG: Executing curl API call: {' '.join(pull_cmd)}")
logger.debug(f"Executing API request: {' '.join(pull_cmd)}")
try:
# Execute pull command
result = subprocess.run(pull_cmd, capture_output=True, text=True)
if result.returncode != 0:
error_msg = f"Failed to pull VM {name}: {result.stderr}"
logger.error(error_msg)
return {"error": error_msg}
try:
response = json.loads(result.stdout)
logger.info(f"Successfully initiated pull for VM {name}")
return response
except json.JSONDecodeError:
if result.stdout:
logger.info(f"Pull response: {result.stdout}")
return {"success": True, "message": f"Successfully initiated pull for VM {name}"}
except subprocess.SubprocessError as e:
error_msg = f"Failed to execute pull command: {str(e)}"
logger.error(error_msg)
return {"error": error_msg}
def lume_api_delete(
vm_name: str,
host: str,
port: int,
storage: Optional[str] = None,
debug: bool = False,
verbose: bool = False
) -> Dict[str, Any]:
"""Delete a VM using curl.
Args:
vm_name: Name of the VM to delete
host: API host
port: API port
storage: Storage path for the VM
debug: Whether to show debug output
verbose: Enable verbose logging
Returns:
Dictionary with API response or error information
"""
# URL encode the storage parameter for the query
encoded_storage = ""
storage_param = ""
if storage:
# First encode the storage path properly
encoded_storage = urllib.parse.quote(storage, safe='')
storage_param = f"?storage={encoded_storage}"
# Construct API URL with encoded storage parameter if needed
api_url = f"http://{host}:{port}/lume/vms/{vm_name}{storage_param}"
# Construct the curl command for DELETE operation - using much longer timeouts matching shell implementation
cmd = ["curl", "--connect-timeout", "6000", "--max-time", "5000", "-s", "-X", "DELETE", f"'{api_url}'"]
# For logging and display, show the properly escaped URL
display_cmd = ["curl", "--connect-timeout", "6000", "--max-time", "5000", "-s", "-X", "DELETE", api_url]
# Only print the curl command when debug is enabled
display_curl_string = ' '.join(display_cmd)
if debug or verbose:
print(f"DEBUG: Executing curl API call: {display_curl_string}")
logger.debug(f"Executing API request: {display_curl_string}")
# Execute the command - for execution we need to use shell=True to handle URLs with special characters
try:
# Use a single string with shell=True for proper URL handling
shell_cmd = ' '.join(cmd)
result = subprocess.run(shell_cmd, shell=True, capture_output=True, text=True)
# Handle curl exit codes
if result.returncode != 0:
curl_error = "Unknown error"
# Map common curl error codes to helpful messages
if result.returncode == 7:
curl_error = "Failed to connect to the API server - it might still be starting up"
elif result.returncode == 22:
curl_error = "HTTP error returned from API server"
elif result.returncode == 28:
curl_error = "Operation timeout - the API server is taking too long to respond"
elif result.returncode == 52:
curl_error = "Empty reply from server - the API server is starting but not fully ready yet"
elif result.returncode == 56:
curl_error = "Network problem during data transfer - check container networking"
# Only log at debug level to reduce noise during retries
logger.debug(f"API request failed with code {result.returncode}: {curl_error}")
# Return a more useful error message
return {
"error": f"API request failed: {curl_error}",
"curl_code": result.returncode,
"vm_name": vm_name,
"storage": storage
}
# Try to parse the response as JSON
if result.stdout and result.stdout.strip():
try:
response = json.loads(result.stdout)
return response
except json.JSONDecodeError:
# Return the raw response if it's not valid JSON
return {"success": True, "message": "VM deleted successfully", "raw_response": result.stdout}
else:
return {"success": True, "message": "VM deleted successfully"}
except subprocess.SubprocessError as e:
logger.error(f"Failed to execute delete request: {e}")
return {"error": f"Failed to execute delete request: {str(e)}"}
def parse_memory(memory_str: str) -> int:
"""Parse memory string to MB integer.

View File

@@ -44,7 +44,7 @@ class LumierProvider(BaseVMProvider):
def __init__(
self,
port: Optional[int] = 3000,
port: Optional[int] = 7777,
host: str = "localhost",
storage: Optional[str] = None,
shared_path: Optional[str] = None,
@@ -56,7 +56,7 @@ class LumierProvider(BaseVMProvider):
"""Initialize the Lumier VM Provider.
Args:
port: Port for the API server (default: 3000)
port: Port for the API server (default: 7777)
host: Hostname for the API server (default: localhost)
storage: Path for persistent VM storage
shared_path: Path for shared folder between host and VM
@@ -66,8 +66,8 @@ class LumierProvider(BaseVMProvider):
noVNC_port: Specific port for noVNC interface (default: 8006)
"""
self.host = host
# Always ensure api_port has a valid value (3000 is the default)
self.api_port = 3000 if port is None else port
# Always ensure api_port has a valid value (7777 is the default)
self.api_port = 7777 if port is None else port
self.vnc_port = noVNC_port # User-specified noVNC port, will be set in run_vm if provided
self.ephemeral = ephemeral
@@ -78,7 +78,7 @@ class LumierProvider(BaseVMProvider):
self.storage = storage
self.shared_path = shared_path
self.vm_image = image # Store the VM image name to use
self.image = image # Store the VM image name to use
# The container_name will be set in run_vm using the VM name
self.verbose = verbose
self._container_id = None
@@ -270,10 +270,11 @@ class LumierProvider(BaseVMProvider):
logger.error(f"Failed to list VMs: {e}")
return []
async def run_vm(self, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
async def run_vm(self, image: str, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Run a VM with the given options.
Args:
image: Name/tag of the image to use
name: Name of the VM to run (used for the container name and Docker image tag)
run_opts: Options for running the VM, including:
- cpu: Number of CPU cores
@@ -284,7 +285,7 @@ class LumierProvider(BaseVMProvider):
Dictionary with VM status information
"""
# Set the container name using the VM name for consistency
self.container_name = name or "lumier1-vm"
self.container_name = name
try:
# First, check if container already exists and remove it
try:
@@ -341,11 +342,11 @@ class LumierProvider(BaseVMProvider):
# Add environment variables
# Always use the container_name as the VM_NAME for consistency
# Use the VM image passed from the Computer class
print(f"Using VM image: {self.vm_image}")
print(f"Using VM image: {self.image}")
cmd.extend([
"-e", f"VM_NAME={self.container_name}",
"-e", f"VERSION=ghcr.io/trycua/{self.vm_image}",
"-e", f"VERSION=ghcr.io/trycua/{self.image}",
"-e", f"CPU_CORES={run_opts.get('cpu', '4')}",
"-e", f"RAM_SIZE={memory_mb}",
])
@@ -390,7 +391,43 @@ class LumierProvider(BaseVMProvider):
# Container started, now check VM status with polling
print("Container started, checking VM status...")
print("NOTE: This may take some time while the VM image is being pulled and initialized")
print("TIP: You can run 'lume logs -f' in another terminal to see the detailed initialization progress")
# Start a background thread to show container logs in real-time
import threading
def show_container_logs():
# Give the container a moment to start generating logs
time.sleep(1)
print(f"\n---- CONTAINER LOGS FOR '{name}' (LIVE) ----")
print("Showing logs as they are generated. Press Ctrl+C to stop viewing logs...\n")
try:
# Use docker logs with follow option
log_cmd = ["docker", "logs", "--tail", "30", "--follow", name]
process = subprocess.Popen(log_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
text=True, bufsize=1, universal_newlines=True)
# Read and print logs line by line
for line in process.stdout:
print(line, end='')
# Break if process has exited
if process.poll() is not None:
break
except Exception as e:
print(f"\nError showing container logs: {e}")
if self.verbose:
logger.error(f"Error in log streaming thread: {e}")
finally:
print("\n---- LOG STREAMING ENDED ----")
# Make sure process is terminated
if 'process' in locals() and process.poll() is None:
process.terminate()
# Start log streaming in a background thread if verbose mode is enabled
log_thread = threading.Thread(target=show_container_logs)
log_thread.daemon = True # Thread will exit when main program exits
log_thread.start()
# Skip waiting for container readiness and just poll get_vm directly
# Poll the get_vm method indefinitely until the VM is ready with an IP address
@@ -680,6 +717,188 @@ class LumierProvider(BaseVMProvider):
"""Not implemented for Lumier provider."""
logger.warning("update_vm is not implemented for Lumier provider")
return {"name": name, "status": "unchanged"}
async def get_logs(self, name: str, num_lines: int = 100, follow: bool = False, timeout: Optional[int] = None) -> str:
"""Get the logs from the Lumier container.
Args:
name: Name of the VM/container to get logs for
num_lines: Number of recent log lines to return (default: 100)
follow: If True, follow the logs (stream new logs as they are generated)
timeout: Optional timeout in seconds for follow mode (None means no timeout)
Returns:
Container logs as a string
Note:
If follow=True, this function will continuously stream logs until timeout
or until interrupted. The output will be printed to console in real-time.
"""
if not HAS_LUMIER:
error_msg = "Docker is not available. Cannot get container logs."
logger.error(error_msg)
return error_msg
# Make sure we have a container name
container_name = name
# Check if the container exists and is running
try:
# Check if the container exists
inspect_cmd = ["docker", "container", "inspect", container_name]
result = subprocess.run(inspect_cmd, capture_output=True, text=True)
if result.returncode != 0:
error_msg = f"Container '{container_name}' does not exist or is not accessible"
logger.error(error_msg)
return error_msg
except Exception as e:
error_msg = f"Error checking container status: {str(e)}"
logger.error(error_msg)
return error_msg
# Base docker logs command
log_cmd = ["docker", "logs"]
# Add tail parameter to limit the number of lines
log_cmd.extend(["--tail", str(num_lines)])
# Handle follow mode with or without timeout
if follow:
log_cmd.append("--follow")
if timeout is not None:
# For follow mode with timeout, we'll run the command and handle the timeout
log_cmd.append(container_name)
logger.info(f"Following logs for container '{container_name}' with timeout {timeout}s")
print(f"\n---- CONTAINER LOGS FOR '{container_name}' (LIVE) ----")
print(f"Press Ctrl+C to stop following logs\n")
try:
# Run with timeout
process = subprocess.Popen(log_cmd, text=True)
# Wait for the specified timeout
if timeout:
try:
process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
process.terminate() # Stop after timeout
print(f"\n---- LOG FOLLOWING STOPPED (timeout {timeout}s reached) ----")
else:
# Without timeout, wait for user interruption
process.wait()
return "Logs were displayed to console in follow mode"
except KeyboardInterrupt:
process.terminate()
print("\n---- LOG FOLLOWING STOPPED (user interrupted) ----")
return "Logs were displayed to console in follow mode (interrupted)"
else:
# For follow mode without timeout, we'll print a helpful message
log_cmd.append(container_name)
logger.info(f"Following logs for container '{container_name}' indefinitely")
print(f"\n---- CONTAINER LOGS FOR '{container_name}' (LIVE) ----")
print(f"Press Ctrl+C to stop following logs\n")
try:
# Run the command and let it run until interrupted
process = subprocess.Popen(log_cmd, text=True)
process.wait() # Wait indefinitely (until user interrupts)
return "Logs were displayed to console in follow mode"
except KeyboardInterrupt:
process.terminate()
print("\n---- LOG FOLLOWING STOPPED (user interrupted) ----")
return "Logs were displayed to console in follow mode (interrupted)"
else:
# For non-follow mode, capture and return the logs as a string
log_cmd.append(container_name)
logger.info(f"Getting {num_lines} log lines for container '{container_name}'")
try:
result = subprocess.run(log_cmd, capture_output=True, text=True, check=True)
logs = result.stdout
# Only print header and logs if there's content
if logs.strip():
print(f"\n---- CONTAINER LOGS FOR '{container_name}' (LAST {num_lines} LINES) ----\n")
print(logs)
print(f"\n---- END OF LOGS ----")
else:
print(f"\nNo logs available for container '{container_name}'")
return logs
except subprocess.CalledProcessError as e:
error_msg = f"Error getting logs: {e.stderr}"
logger.error(error_msg)
return error_msg
except Exception as e:
error_msg = f"Unexpected error getting logs: {str(e)}"
logger.error(error_msg)
return error_msg
async def get_ip(self, name: str, storage: Optional[str] = None, retry_delay: int = 2) -> str:
"""Get the IP address of a VM, waiting indefinitely until it's available.
Args:
name: Name of the VM to get the IP for
storage: Optional storage path override
retry_delay: Delay between retries in seconds (default: 2)
Returns:
IP address of the VM when it becomes available
"""
# Use container_name = name for consistency
self.container_name = name
# Track total attempts for logging purposes
total_attempts = 0
# Loop indefinitely until we get a valid IP
while True:
total_attempts += 1
# Log retry message but not on first attempt
if total_attempts > 1:
logger.info(f"Waiting for VM {name} IP address (attempt {total_attempts})...")
try:
# Get VM information
vm_info = await self.get_vm(name, storage=storage)
# Check if we got a valid IP
ip = vm_info.get("ip_address", None)
if ip and ip != "unknown" and not ip.startswith("0.0.0.0"):
logger.info(f"Got valid VM IP address: {ip}")
return ip
# Check the VM status
status = vm_info.get("status", "unknown")
# Special handling for Lumier: it may report "stopped" even when the VM is starting
# If the VM information contains an IP but status is stopped, it might be a race condition
if status == "stopped" and "ip_address" in vm_info:
ip = vm_info.get("ip_address")
if ip and ip != "unknown" and not ip.startswith("0.0.0.0"):
logger.info(f"Found valid IP {ip} despite VM status being {status}")
return ip
logger.info(f"VM status is {status}, but still waiting for IP to be assigned")
# If VM is not running yet, log and wait
elif status != "running":
logger.info(f"VM is not running yet (status: {status}). Waiting...")
# If VM is running but no IP yet, wait and retry
else:
logger.info("VM is running but no valid IP address yet. Waiting...")
except Exception as e:
logger.warning(f"Error getting VM {name} IP: {e}, continuing to wait...")
# Wait before next retry
await asyncio.sleep(retry_delay)
# Add progress log every 10 attempts
if total_attempts % 10 == 0:
logger.info(f"Still waiting for VM {name} IP after {total_attempts} attempts...")
async def __aenter__(self):
"""Async context manager entry.

View File

@@ -72,7 +72,7 @@ class QEMUProvider(BaseVMProvider):
"""List all available VMs."""
raise NotImplementedError("QEMU provider is not implemented yet")
async def run_vm(self, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
async def run_vm(self, image: str, name: str, run_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Run a VM with the given options."""
raise NotImplementedError("QEMU provider is not implemented yet")
@@ -83,3 +83,7 @@ class QEMUProvider(BaseVMProvider):
async def update_vm(self, name: str, update_opts: Dict[str, Any], storage: Optional[str] = None) -> Dict[str, Any]:
"""Update VM configuration."""
raise NotImplementedError("QEMU provider is not implemented yet")
async def get_ip(self, name: str, storage: Optional[str] = None, retry_delay: int = 2) -> str:
"""Get the IP address of a VM, waiting indefinitely until it's available."""
raise NotImplementedError("QEMU provider is not implemented yet")

View File

@@ -136,7 +136,7 @@ Command Options:
set <boolean> Enable or disable image caching
serve:
--port <port> Port to listen on (default: 3000)
--port <port> Port to listen on (default: 7777)
```
## Install
@@ -178,7 +178,7 @@ For additional disk space, resize the VM disk after pulling the image using the
## Local API Server
`lume` exposes a local HTTP API server that listens on `http://localhost:3000/lume`, enabling automated management of VMs.
`lume` exposes a local HTTP API server that listens on `http://localhost:7777/lume`, enabling automated management of VMs.
```bash
lume serve

View File

@@ -18,7 +18,7 @@ curl --connect-timeout 6000 \
"ipsw": "latest",
"storage": "ssd"
}' \
http://localhost:3000/lume/vms
http://localhost:7777/lume/vms
```
</details>
@@ -30,7 +30,7 @@ curl --connect-timeout 6000 \
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
http://localhost:3000/lume/vms/my-vm-name/run
http://localhost:7777/lume/vms/my-vm-name/run
# Run with VNC client started and shared directory
curl --connect-timeout 6000 \
@@ -48,7 +48,7 @@ curl --connect-timeout 6000 \
"recoveryMode": false,
"storage": "ssd"
}' \
http://localhost:3000/lume/vms/lume_vm/run
http://localhost:7777/lume/vms/lume_vm/run
```
</details>
@@ -58,7 +58,7 @@ curl --connect-timeout 6000 \
```bash
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/vms
http://localhost:7777/lume/vms
```
```
[
@@ -89,12 +89,12 @@ curl --connect-timeout 6000 \
# Basic get
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/vms/lume_vm
http://localhost:7777/lume/vms/lume_vm
# Get with storage location specified
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/vms/lume_vm?storage=ssd
http://localhost:7777/lume/vms/lume_vm?storage=ssd
```
```
{
@@ -122,7 +122,7 @@ curl --connect-timeout 6000 \
"diskSize": "128GB",
"storage": "ssd"
}' \
http://localhost:3000/lume/vms/my-vm-name
http://localhost:7777/lume/vms/my-vm-name
```
</details>
@@ -134,13 +134,13 @@ curl --connect-timeout 6000 \
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
http://localhost:3000/lume/vms/my-vm-name/stop
http://localhost:7777/lume/vms/my-vm-name/stop
# Stop with storage location specified
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
http://localhost:3000/lume/vms/my-vm-name/stop?storage=ssd
http://localhost:7777/lume/vms/my-vm-name/stop?storage=ssd
```
</details>
@@ -152,13 +152,13 @@ curl --connect-timeout 6000 \
curl --connect-timeout 6000 \
--max-time 5000 \
-X DELETE \
http://localhost:3000/lume/vms/my-vm-name
http://localhost:7777/lume/vms/my-vm-name
# Delete with storage location specified
curl --connect-timeout 6000 \
--max-time 5000 \
-X DELETE \
http://localhost:3000/lume/vms/my-vm-name?storage=ssd
http://localhost:7777/lume/vms/my-vm-name?storage=ssd
```
</details>
@@ -177,7 +177,7 @@ curl --connect-timeout 6000 \
"organization": "trycua",
"storage": "ssd"
}' \
http://localhost:3000/lume/pull
http://localhost:7777/lume/pull
```
```bash
@@ -189,7 +189,7 @@ curl --connect-timeout 6000 \
"image": "macos-sequoia-vanilla:15.2",
"name": "macos-sequoia-vanilla"
}' \
http://localhost:3000/lume/pull
http://localhost:7777/lume/pull
```
</details>
@@ -211,7 +211,7 @@ curl --connect-timeout 6000 \
"chunkSizeMb": 512,
"storage": null
}' \
http://localhost:3000/lume/vms/push
http://localhost:7777/lume/vms/push
```
**Response (202 Accepted):**
@@ -243,7 +243,7 @@ curl --connect-timeout 6000 \
"sourceLocation": "default",
"destLocation": "ssd"
}' \
http://localhost:3000/lume/vms/clone
http://localhost:7777/lume/vms/clone
```
</details>
@@ -253,7 +253,7 @@ curl --connect-timeout 6000 \
```bash
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/ipsw
http://localhost:7777/lume/ipsw
```
</details>
@@ -264,7 +264,7 @@ curl --connect-timeout 6000 \
# List images with default organization (trycua)
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/images
http://localhost:7777/lume/images
```
```json
@@ -284,7 +284,7 @@ curl --connect-timeout 6000 \
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
http://localhost:3000/lume/prune
http://localhost:7777/lume/prune
```
</details>
@@ -294,7 +294,7 @@ curl --connect-timeout 6000 \
```bash
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/config
http://localhost:7777/lume/config
```
```json
@@ -319,7 +319,7 @@ curl --connect-timeout 6000 \
"cacheDirectory": "~/custom/lume/cache",
"cachingEnabled": true
}' \
http://localhost:3000/lume/config
http://localhost:7777/lume/config
```
</details>
@@ -329,7 +329,7 @@ curl --connect-timeout 6000 \
```bash
curl --connect-timeout 6000 \
--max-time 5000 \
http://localhost:3000/lume/config/locations
http://localhost:7777/lume/config/locations
```
```json
@@ -360,7 +360,7 @@ curl --connect-timeout 6000 \
"name": "ssd",
"path": "/Volumes/SSD/lume/vms"
}' \
http://localhost:3000/lume/config/locations
http://localhost:7777/lume/config/locations
```
</details>
@@ -371,7 +371,7 @@ curl --connect-timeout 6000 \
curl --connect-timeout 6000 \
--max-time 5000 \
-X DELETE \
http://localhost:3000/lume/config/locations/ssd
http://localhost:7777/lume/config/locations/ssd
```
</details>
@@ -382,6 +382,6 @@ curl --connect-timeout 6000 \
curl --connect-timeout 6000 \
--max-time 5000 \
-X POST \
http://localhost:3000/lume/config/locations/default/ssd
http://localhost:7777/lume/config/locations/default/ssd
```
</details>

View File

@@ -33,8 +33,8 @@ LATEST_RELEASE_URL="https://api.github.com/repos/$GITHUB_REPO/releases/latest"
# Option to skip background service setup (default: install it)
INSTALL_BACKGROUND_SERVICE=true
# Default port for lume serve (default: 3000)
LUME_PORT=3000
# Default port for lume serve (default: 7777)
LUME_PORT=7777
# Parse command line arguments
while [ "$#" -gt 0 ]; do
@@ -56,14 +56,14 @@ while [ "$#" -gt 0 ]; do
echo ""
echo "Options:"
echo " --install-dir DIR Install to the specified directory (default: $DEFAULT_INSTALL_DIR)"
echo " --port PORT Specify the port for lume serve (default: 3000)"
echo " --port PORT Specify the port for lume serve (default: 7777)"
echo " --no-background-service Do not setup the Lume background service (LaunchAgent)"
echo " --help Display this help message"
echo ""
echo "Examples:"
echo " $0 # Install to $DEFAULT_INSTALL_DIR and setup background service"
echo " $0 --install-dir=/usr/local/bin # Install to system directory (may require root privileges)"
echo " $0 --port 3001 # Use port 3001 instead of the default 3000"
echo " $0 --port 7778 # Use port 7778 instead of the default 7777"
echo " $0 --no-background-service # Install without setting up the background service"
echo " INSTALL_DIR=/opt/lume $0 # Install to /opt/lume (legacy env var support)"
exit 0

View File

@@ -7,7 +7,7 @@ struct Serve: AsyncParsableCommand {
)
@Option(help: "Port to listen on")
var port: UInt16 = 3000
var port: UInt16 = 7777
func run() async throws {
let server = await Server(port: port)

View File

@@ -68,7 +68,7 @@ final class Server {
private var routes: [Route]
// MARK: - Initialization
init(port: UInt16 = 3000) {
init(port: UInt16 = 7777) {
self.port = NWEndpoint.Port(rawValue: port)!
self.controller = LumeController()
self.routes = []

View File

@@ -36,7 +36,7 @@ Before using Lumier, make sure you have:
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)"
```
After installation, Lume runs as a background service and listens on port 3000. This service allows Lumier to create and manage virtual machines. If port 3000 is already in use on your system, you can specify a different port with the `--port` option when running the `install.sh` script.
After installation, Lume runs as a background service and listens on port 7777. This service allows Lumier to create and manage virtual machines. If port 7777 is already in use on your system, you can specify a different port with the `--port` option when running the `install.sh` script.
## How It Works

View File

@@ -127,7 +127,7 @@ lume_get() {
local debug="${4:-false}"
local api_host="${LUME_API_HOST:-host.docker.internal}"
local api_port="${LUME_API_PORT:-3000}"
local api_port="${LUME_API_PORT:-7777}"
# URL encode the storage path for the query parameter
# Replace special characters with their URL encoded equivalents
@@ -175,7 +175,7 @@ lume_set() {
local display="${5:-1024x768}"
local api_host="${LUME_API_HOST:-host.docker.internal}"
local api_port="${LUME_API_PORT:-3000}"
local api_port="${LUME_API_PORT:-7777}"
# Handle memory format for the API
if [[ "$memory" == *"GB"* ]]; then
@@ -258,7 +258,7 @@ lume_stop() {
local storage="$2"
local api_host="${LUME_API_HOST:-host.docker.internal}"
local api_port="${LUME_API_PORT:-3000}"
local api_port="${LUME_API_PORT:-7777}"
# Only log in debug mode
if [[ "$LUMIER_DEBUG" == "1" ]]; then
@@ -297,7 +297,7 @@ lume_pull() {
local organization="${5:-trycua}" # Organization, default is trycua
local api_host="${LUME_API_HOST:-host.docker.internal}"
local api_port="${LUME_API_PORT:-3000}"
local api_port="${LUME_API_PORT:-7777}"
# Mark that pull is in progress for interrupt handling
export PULL_IN_PROGRESS=1
@@ -394,7 +394,7 @@ lume_run() {
done
local api_host="${LUME_API_HOST:-host.docker.internal}"
local api_port="${LUME_API_PORT:-3000}"
local api_port="${LUME_API_PORT:-7777}"
# Only log in debug mode
if [[ "$LUMIER_DEBUG" == "1" ]]; then
@@ -446,7 +446,7 @@ lume_delete() {
local storage="$2"
local api_host="${LUME_API_HOST:-host.docker.internal}"
local api_port="${LUME_API_PORT:-3000}"
local api_port="${LUME_API_PORT:-7777}"
# URL encode the storage path for the query parameter
# Replace special characters with their URL encoded equivalents

View File

@@ -81,7 +81,7 @@
"outputs": [],
"source": [
"async def get_ipsw():\n",
" async with PyLume(port=3000) as pylume:\n",
" async with PyLume(port=7777) as pylume:\n",
" url = await pylume.get_latest_ipsw_url()\n",
" print(f\"Latest IPSW URL: {url}\")\n",
"\n",

View File

@@ -47,7 +47,7 @@ case "$1" in
if [ "$2" == "--interactive" ]; then
print_info "Running the development Docker container with interactive shell..."
print_info "Mounting source code from host"
print_info "Connecting to host.docker.internal:3000"
print_info "Connecting to host.docker.internal:7777"
docker run -it --rm \
--platform=${PLATFORM} \
@@ -64,7 +64,7 @@ case "$1" in
exit 1
fi
print_info "Running example: $2"
print_info "Connecting to host.docker.internal:3000"
print_info "Connecting to host.docker.internal:7777"
docker run -it --rm \
--platform=${PLATFORM} \