diff --git a/.all-contributorsrc b/.all-contributorsrc deleted file mode 100644 index 5969251d..00000000 --- a/.all-contributorsrc +++ /dev/null @@ -1,183 +0,0 @@ -{ - "projectName": "cua", - "projectOwner": "trycua", - "files": [ - "README.md" - ], - "commitType": "docs", - "commitConvention": "angular", - "contributorsPerLine": 7, - "contributors": [ - { - "login": "f-trycua", - "name": "f-trycua", - "avatar_url": "https://avatars.githubusercontent.com/u/195596869?v=4", - "profile": "https://github.com/f-trycua", - "contributions": [ - "code" - ] - }, - { - "login": "pepicrft", - "name": "Pedro Piñera Buendía", - "avatar_url": "https://avatars.githubusercontent.com/u/663605?v=4", - "profile": "http://pepicrft.me", - "contributions": [ - "code" - ] - }, - { - "login": "aktech", - "name": "Amit Kumar", - "avatar_url": "https://avatars.githubusercontent.com/u/5647941?v=4", - "profile": "https://iamit.in", - "contributions": [ - "code" - ] - }, - { - "login": "jellydn", - "name": "Dung Duc Huynh (Kaka)", - "avatar_url": "https://avatars.githubusercontent.com/u/870029?v=4", - "profile": "https://productsway.com/", - "contributions": [ - "code" - ] - }, - { - "login": "ShrootBuck", - "name": "Zayd Krunz", - "avatar_url": "https://avatars.githubusercontent.com/u/70227235?v=4", - "profile": "http://zaydkrunz.com", - "contributions": [ - "code" - ] - }, - { - "login": "PrashantRaj18198", - "name": "Prashant Raj", - "avatar_url": "https://avatars.githubusercontent.com/u/23168997?v=4", - "profile": "https://github.com/PrashantRaj18198", - "contributions": [ - "code" - ] - }, - { - "login": "Leland-Takamine", - "name": "Leland Takamine", - "avatar_url": "https://avatars.githubusercontent.com/u/847683?v=4", - "profile": "https://www.mobile.dev", - "contributions": [ - "code" - ] - }, - { - "login": "ddupont808", - "name": "ddupont", - "avatar_url": "https://avatars.githubusercontent.com/u/3820588?v=4", - "profile": "https://github.com/ddupont808", - "contributions": [ - "code" - ] - }, - { - "login": "Lizzard1123", - "name": "Ethan Gutierrez", - "avatar_url": "https://avatars.githubusercontent.com/u/46036335?v=4", - "profile": "https://github.com/Lizzard1123", - "contributions": [ - "code" - ] - }, - { - "login": "RicterZ", - "name": "Ricter Zheng", - "avatar_url": "https://avatars.githubusercontent.com/u/5282759?v=4", - "profile": "https://ricterz.me", - "contributions": [ - "code" - ] - }, - { - "login": "rahulkarajgikar", - "name": "Rahul Karajgikar", - "avatar_url": "https://avatars.githubusercontent.com/u/50844303?v=4", - "profile": "https://www.trytruffle.ai/", - "contributions": [ - "code" - ] - }, - { - "login": "trospix", - "name": "trospix", - "avatar_url": "https://avatars.githubusercontent.com/u/81363696?v=4", - "profile": "https://github.com/trospix", - "contributions": [ - "code" - ] - }, - { - "login": "eltociear", - "name": "Ikko Eltociear Ashimine", - "avatar_url": "https://avatars.githubusercontent.com/u/22633385?v=4", - "profile": "https://wavee.world/invitation/b96d00e6-b802-4a1b-8a66-2e3854a01ffd", - "contributions": [ - "code" - ] - }, - { - "login": "dp221125", - "name": "한석호(MilKyo)", - "avatar_url": "https://avatars.githubusercontent.com/u/10572119?v=4", - "profile": "https://github.com/dp221125", - "contributions": [ - "code" - ] - }, - { - "login": "rahimnathwani", - "name": "Rahim Nathwani", - "avatar_url": "https://avatars.githubusercontent.com/u/891558?v=4", - "profile": "https://www.encona.com/", - "contributions": [ - "code" - ] - }, - { - "login": "mjspeck", - "name": "Matt Speck", - "avatar_url": "https://avatars.githubusercontent.com/u/20689127?v=4", - "profile": "https://mjspeck.github.io/", - "contributions": [ - "code" - ] - }, - { - "login": "FinnBorge", - "name": "FinnBorge", - "avatar_url": "https://avatars.githubusercontent.com/u/9272726?v=4", - "profile": "https://github.com/FinnBorge", - "contributions": [ - "code" - ] - }, - { - "login": "jklapacz", - "name": "Jakub Klapacz", - "avatar_url": "https://avatars.githubusercontent.com/u/5343758?v=4", - "profile": "https://github.com/jklapacz", - "contributions": [ - "code" - ] - }, - { - "login": "evnsnclr", - "name": "Evan smith", - "avatar_url": "https://avatars.githubusercontent.com/u/139897548?v=4", - "profile": "https://github.com/evnsnclr", - "contributions": [ - "code" - ] - } - ] -} diff --git a/.github/workflows/pypi-publish-pylume.yml b/.github/workflows/pypi-publish-pylume.yml deleted file mode 100644 index 91278c00..00000000 --- a/.github/workflows/pypi-publish-pylume.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Publish Pylume Package - -on: - push: - tags: - - "pylume-v*" - workflow_dispatch: - inputs: - version: - description: "Version to publish (without v prefix)" - required: true - default: "0.1.0" - workflow_call: - inputs: - version: - description: "Version to publish" - required: true - type: string - outputs: - version: - description: "The version that was published" - value: ${{ jobs.determine-version.outputs.version }} - -# Adding permissions at workflow level -permissions: - contents: write - -jobs: - determine-version: - runs-on: macos-latest - outputs: - version: ${{ steps.get-version.outputs.version }} - steps: - - uses: actions/checkout@v4 - - - name: Determine version - id: get-version - run: | - if [ "${{ github.event_name }}" == "push" ]; then - # Extract version from tag (for package-specific tags) - if [[ "${{ github.ref }}" =~ ^refs/tags/pylume-v([0-9]+\.[0-9]+\.[0-9]+) ]]; then - VERSION=${BASH_REMATCH[1]} - else - echo "Invalid tag format for pylume" - exit 1 - fi - elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then - # Use version from workflow dispatch - VERSION=${{ github.event.inputs.version }} - else - # Use version from workflow_call - VERSION=${{ inputs.version }} - fi - echo "VERSION=$VERSION" - echo "version=$VERSION" >> $GITHUB_OUTPUT - - validate-version: - runs-on: macos-latest - needs: determine-version - steps: - - uses: actions/checkout@v4 - - name: Validate version - id: validate-version - run: | - CODE_VERSION=$(grep '__version__' libs/python/pylume/pylume/__init__.py | cut -d'"' -f2) - if [ "${{ needs.determine-version.outputs.version }}" != "$CODE_VERSION" ]; then - echo "Version mismatch: expected $CODE_VERSION, got ${{ needs.determine-version.outputs.version }}" - exit 1 - fi - echo "Version validated: $CODE_VERSION" - - publish: - needs: determine-version - uses: ./.github/workflows/pypi-reusable-publish.yml - with: - package_name: "pylume" - package_dir: "libs/python/pylume" - version: ${{ needs.determine-version.outputs.version }} - is_lume_package: true - base_package_name: "pylume" - secrets: - PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/pypi-reusable-publish.yml b/.github/workflows/pypi-reusable-publish.yml index 4a220610..614d8a7d 100644 --- a/.github/workflows/pypi-reusable-publish.yml +++ b/.github/workflows/pypi-reusable-publish.yml @@ -4,11 +4,11 @@ on: workflow_call: inputs: package_name: - description: "Name of the package (e.g. pylume, computer, agent)" + description: "Name of the package (e.g. computer, agent)" required: true type: string package_dir: - description: "Directory containing the package relative to workspace root (e.g. libs/python/pylume)" + description: "Directory containing the package relative to workspace root (e.g. libs/python/computer)" required: true type: string version: @@ -21,7 +21,7 @@ on: type: boolean default: false base_package_name: - description: "PyPI package name (e.g. pylume, cua-agent)" + description: "PyPI package name (e.g. cua-agent)" required: true type: string make_latest: diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml new file mode 100644 index 00000000..6bfbed5c --- /dev/null +++ b/.github/workflows/python-tests.yml @@ -0,0 +1,93 @@ +name: Python Unit Tests + +on: + pull_request: + paths: + - "libs/python/**" + - ".github/workflows/python-tests.yml" + push: + branches: + - main + paths: + - "libs/python/**" + - ".github/workflows/python-tests.yml" + workflow_dispatch: # Allow manual trigger + +jobs: + test: + name: Test ${{ matrix.package }} + runs-on: ubuntu-latest + + strategy: + fail-fast: false # Test all packages even if one fails + matrix: + package: + - core + - agent + - computer + - computer-server + - mcp-server + - pylume + - som + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install uv + run: | + pip install uv + + - name: Install package and dependencies + run: | + cd libs/python/${{ matrix.package }} + # Install the package in editable mode with dev dependencies + if [ -f pyproject.toml ]; then + uv pip install --system -e . + # Install test dependencies + uv pip install --system pytest pytest-asyncio pytest-mock pytest-cov + fi + shell: bash + + - name: Run tests + run: | + cd libs/python/${{ matrix.package }} + if [ -d tests ]; then + python -m pytest tests/ -v --tb=short --cov --cov-report=term --cov-report=xml + else + echo "No tests directory found, skipping tests" + fi + shell: bash + env: + CUA_TELEMETRY_DISABLED: "1" # Disable telemetry during tests + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + if: always() + with: + file: ./libs/python/${{ matrix.package }}/coverage.xml + flags: ${{ matrix.package }} + name: codecov-${{ matrix.package }} + fail_ci_if_error: false + continue-on-error: true + + summary: + name: Test Summary + runs-on: ubuntu-latest + needs: test + if: always() + + steps: + - name: Check test results + run: | + if [ "${{ needs.test.result }}" == "failure" ]; then + echo "❌ Some tests failed. Please check the logs above." + exit 1 + else + echo "✅ All tests passed!" + fi diff --git a/.github/workflows/test-cua-models.yml b/.github/workflows/test-cua-models.yml new file mode 100644 index 00000000..2fa3f206 --- /dev/null +++ b/.github/workflows/test-cua-models.yml @@ -0,0 +1,372 @@ +name: Test CUA Supporting Models + +# This workflow tests all supported CUA models with API keys +# Run manually using workflow_dispatch with test_models=true + +on: + pull_request_target: + branches: [main, master] + workflow_dispatch: + inputs: + test_models: + description: "Test all supported models (requires API keys)" + required: false + default: true + type: boolean + +jobs: + # Test all CUA models - runs on PRs or when manually triggered + test-all-models: + if: ${{ github.event_name == 'pull_request_target' || fromJSON(inputs.test_models || 'false') }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + model: + # Claude Sonnet/Haiku + # - anthropic/claude-sonnet-4-5-20250929 + - anthropic/claude-haiku-4-5-20251001 + # - anthropic/claude-opus-4-1-20250805 + + # OpenAI CU Preview + - openai/computer-use-preview + + # GLM-V + # - openrouter/z-ai/glm-4.5v + # - huggingface-local/zai-org/GLM-4.5V # Requires local model setup + + # Gemini CU Preview + # - gemini-2.5-computer-use-preview-10-2025 + + # InternVL + # - huggingface-local/OpenGVLab/InternVL3_5-1B + # - huggingface-local/OpenGVLab/InternVL3_5-2B + # - huggingface-local/OpenGVLab/InternVL3_5-4B + # - huggingface-local/OpenGVLab/InternVL3_5-8B + + # UI-TARS (supports full computer-use, can run standalone) + # - huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B + + # Note: OpenCUA, GTA, and Holo are grounding-only models + # They only support predict_click(), not agent.run() + # See composed agents section below for testing them + + # Moondream (typically used in composed agents) + # Format: moondream3+{any-llm-with-tools} + # - moondream3+anthropic/claude-sonnet-4-5-20250929 # Claude has VLM + Tools + # - moondream3+openai/gpt-4o # GPT-4o has VLM + Tools + + # OmniParser (typically used in composed agents) + # Format: omniparser+{any-vlm-with-tools} + # - omniparser+anthropic/claude-sonnet-4-5-20250929 # Claude has VLM + Tools + # - omniparser+openai/gpt-4o # GPT-4o has VLM + Tools + + # Other grounding models + VLM with tools + # Format: {grounding-model}+{any-vlm-with-tools} + # These grounding-only models (OpenCUA, GTA, Holo) must be used in composed form + # since they only support predict_click(), not full agent.run() + # - huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-sonnet-4-5-20250929 + # - huggingface-local/xlangai/OpenCUA-7B+anthropic/claude-sonnet-4-5-20250929 + # - huggingface-local/Hcompany/Holo1.5-3B+anthropic/claude-sonnet-4-5-20250929 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up uv and Python + uses: astral-sh/setup-uv@v4 + with: + python-version: "3.12" + + - name: Cache system packages + uses: actions/cache@v4 + with: + path: /var/cache/apt + key: ${{ runner.os }}-apt-${{ hashFiles('**/Dockerfile') }} + restore-keys: | + ${{ runner.os }}-apt- + + - name: Install system dependencies + timeout-minutes: 20 + run: | + sudo apt-get update + sudo apt-get install -y libgl1-mesa-dri libglib2.0-0 + + - name: Cache Python dependencies (uv) + uses: actions/cache@v4 + with: + path: | + ~/.cache/uv + .venv + key: ${{ runner.os }}-uv-${{ hashFiles('pyproject.toml', 'uv.lock', 'libs/python/**/pyproject.toml') }} + restore-keys: | + ${{ runner.os }}-uv- + + - name: Install CUA dependencies (uv) + run: | + # Remove existing venv if it exists (from cache restore) to avoid interactive prompt + rm -rf .venv + uv venv --python 3.12 + uv pip install -e libs/python/agent -e libs/python/computer + uv pip install -e libs/python/core + uv pip install "cua-agent[uitars-hf]" + uv pip install pytest + + - name: Cache HuggingFace models + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface + key: ${{ runner.os }}-hf-models-v1 + restore-keys: | + ${{ runner.os }}-hf-models- + # Large cache - models can be several GB each and are reused across runs + + - name: Record test start time + run: echo "TEST_START_TIME=$(date +%s)" >> $GITHUB_ENV + env: + # Ensure HuggingFace uses consistent cache location + HF_HOME: ~/.cache/huggingface + + - name: Test model with agent loop + id: test_model + timeout-minutes: 20 + continue-on-error: true + run: | + cd tests/agent_loop_testing + uv run python agent_test.py --model "${{ matrix.model }}" + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + + - name: Calculate test duration and prepare message + if: always() + run: | + TEST_END_TIME=$(date +%s) + + # Handle case where TEST_START_TIME might not be set + if [ -z "$TEST_START_TIME" ]; then + TEST_START_TIME=$TEST_END_TIME + fi + + TEST_DURATION=$((TEST_END_TIME - TEST_START_TIME)) + + # Convert seconds to minutes and seconds + MINUTES=$((TEST_DURATION / 60)) + SECONDS=$((TEST_DURATION % 60)) + + # Format duration + if [ $MINUTES -gt 0 ]; then + DURATION_STR="${MINUTES}m ${SECONDS}s" + else + DURATION_STR="${SECONDS}s" + fi + + # Determine status icon based on test step outcome + if [ "${{ steps.test_model.outcome }}" == "success" ]; then + STATUS_ICON="✅" + STATUS_TEXT="PASSED" + SLACK_COLOR="#36a64f" + else + STATUS_ICON="❌" + STATUS_TEXT="FAILED" + SLACK_COLOR="#dc3545" + fi + + # Prepare Slack message + echo "TESTS_CONTENT<> $GITHUB_ENV + echo "*CUA Model Test Results*" >> $GITHUB_ENV + echo "" >> $GITHUB_ENV + echo "*Model:* ${{ matrix.model }}" >> $GITHUB_ENV + echo "*Status:* ${STATUS_ICON} ${STATUS_TEXT}" >> $GITHUB_ENV + echo "*Duration:* ${DURATION_STR}" >> $GITHUB_ENV + echo "*Run:* ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + # Set color based on outcome + echo "SLACK_COLOR=${SLACK_COLOR}" >> $GITHUB_ENV + + # Save result to JSON file for summary + mkdir -p test_summary + MODEL_NAME="${{ matrix.model }}" + # Sanitize model name for filename + SAFE_MODEL_NAME=$(echo "$MODEL_NAME" | sed 's/[^a-zA-Z0-9]/_/g') + + # Determine pass status + if [ "${{ steps.test_model.outcome }}" == "success" ]; then + PASSED_VAL="true" + else + PASSED_VAL="false" + fi + + # Create JSON file using printf to avoid YAML parsing issues + printf '{\n "model": "%s",\n "status": "%s",\n "status_icon": "%s",\n "duration": "%s",\n "duration_seconds": %d,\n "passed": %s\n}' \ + "${MODEL_NAME}" "${STATUS_TEXT}" "${STATUS_ICON}" "${DURATION_STR}" "${TEST_DURATION}" "${PASSED_VAL}" \ + > "test_summary/${SAFE_MODEL_NAME}.json" + # Expose safe model name for subsequent steps (artifact naming) + echo "SAFE_MODEL_NAME=${SAFE_MODEL_NAME}" >> $GITHUB_ENV + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-results-${{ matrix.model }} + path: | + tests/agent_loop_testing/test_images/ + *.log + retention-days: 7 + + - name: Upload test summary data + if: always() + uses: actions/upload-artifact@v4 + with: + # Unique, slash-free artifact name per matrix entry + name: test-summary-${{ env.SAFE_MODEL_NAME }} + path: test_summary/ + retention-days: 1 + + - name: Set default Slack color + if: always() && env.SLACK_COLOR == '' + run: echo "SLACK_COLOR=#36a64f" >> $GITHUB_ENV + + # Individual model notifications disabled - only summary is sent + # - name: Notify Slack with test results + # if: always() + # uses: rtCamp/action-slack-notify@v2 + # env: + # SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + # SLACK_CHANNEL: ${{ vars.SLACK_CHANNEL }} + # SLACK_TITLE: CUA Model Test Update + # SLACK_COLOR: ${{ env.SLACK_COLOR }} + # SLACK_MESSAGE: | + # ${{ env.TESTS_CONTENT }} + + # Summary job that aggregates all model test results + test-summary: + if: ${{ always() && (github.event_name == 'pull_request_target' || fromJSON(inputs.test_models || 'false')) }} + needs: test-all-models + runs-on: ubuntu-latest + steps: + - name: Install jq + run: sudo apt-get update && sudo apt-get install -y jq + + - name: Download all test summary artifacts + continue-on-error: true + uses: actions/download-artifact@v4 + with: + pattern: test-summary-* + merge-multiple: true + path: all_summaries + + - name: Generate and send summary + if: always() + shell: bash + run: | + # Create directory if it doesn't exist + mkdir -p all_summaries + + # Get list of models being tested in this run from the matrix + # This helps filter out artifacts from previous runs when testing locally + EXPECTED_MODELS="${{ join(matrix.model, ' ') }}" + + # Aggregate all results + PASSED_COUNT=0 + FAILED_COUNT=0 + TOTAL_DURATION=0 + SUMMARY_MESSAGE="*🚀 Model Summaries*\n\n" + + # Process each JSON file (find all JSON files recursively) + # Save to temp file first to avoid subshell issues + find all_summaries -name "*.json" -type f 2>/dev/null > /tmp/json_files.txt || true + + # Use associative array to deduplicate by model name + declare -A processed_models + + while IFS= read -r json_file; do + if [ -f "$json_file" ]; then + MODEL=$(jq -r '.model' "$json_file") + + # Skip if we've already processed this model + if [ "${processed_models[$MODEL]}" = "1" ]; then + echo "Skipping duplicate model: $MODEL" + continue + fi + + # Filter: Only include models that are in the current matrix + # This prevents including artifacts from previous workflow runs + if [ -n "$EXPECTED_MODELS" ]; then + if ! echo "$EXPECTED_MODELS" | grep -q "$MODEL"; then + echo "Skipping model from previous run: $MODEL" + continue + fi + fi + + # Mark as processed + processed_models[$MODEL]="1" + + STATUS_ICON=$(jq -r '.status_icon' "$json_file") + STATUS=$(jq -r '.status' "$json_file") + DURATION=$(jq -r '.duration' "$json_file") + DURATION_SEC=$(jq -r '.duration_seconds' "$json_file") + PASSED=$(jq -r '.passed' "$json_file") + + # Add to summary as clean line format + SUMMARY_MESSAGE="${SUMMARY_MESSAGE}${STATUS_ICON} ${STATUS} - \`${MODEL}\` - ${DURATION}\n" + + if [ "$PASSED" = "true" ]; then + PASSED_COUNT=$((PASSED_COUNT + 1)) + else + FAILED_COUNT=$((FAILED_COUNT + 1)) + fi + TOTAL_DURATION=$((TOTAL_DURATION + DURATION_SEC)) + fi + done < /tmp/json_files.txt + + # Check if we found any results + TOTAL_COUNT=$((PASSED_COUNT + FAILED_COUNT)) + if [ $TOTAL_COUNT -eq 0 ]; then + SUMMARY_MESSAGE="${SUMMARY_MESSAGE}⚠️ No test results found (workflow may have been canceled)\n" + SLACK_COLOR="#ffa500" + else + # Add summary stats + SUMMARY_MESSAGE="${SUMMARY_MESSAGE}\n*Results:* ${PASSED_COUNT} passed, ${FAILED_COUNT} failed out of ${TOTAL_COUNT} models\n" + + # Calculate total duration + TOTAL_MIN=$((TOTAL_DURATION / 60)) + TOTAL_SEC=$((TOTAL_DURATION % 60)) + if [ $TOTAL_MIN -gt 0 ]; then + TOTAL_DURATION_STR="${TOTAL_MIN}m ${TOTAL_SEC}s" + else + TOTAL_DURATION_STR="${TOTAL_SEC}s" + fi + SUMMARY_MESSAGE="${SUMMARY_MESSAGE}*Total Duration:* ${TOTAL_DURATION_STR}\n" + + # Determine color based on results + if [ $FAILED_COUNT -eq 0 ]; then + SLACK_COLOR="#36a64f" + elif [ $PASSED_COUNT -eq 0 ]; then + SLACK_COLOR="#dc3545" + else + SLACK_COLOR="#ffa500" + fi + fi + + SUMMARY_MESSAGE="${SUMMARY_MESSAGE}*Run:* ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + + # Export for use in next step + echo "SUMMARY_MESSAGE<> $GITHUB_ENV + echo -e "${SUMMARY_MESSAGE}" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + echo "SLACK_COLOR=${SLACK_COLOR}" >> $GITHUB_ENV + + - name: Send summary to Slack + if: always() + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: ${{ vars.SLACK_CHANNEL }} + SLACK_TITLE: CUA Models Test Summary + SLACK_COLOR: ${{ env.SLACK_COLOR }} + SLACK_MESSAGE: | + ${{ env.SUMMARY_MESSAGE }} diff --git a/.gitignore b/.gitignore index 8cae22ce..adacb39a 100644 --- a/.gitignore +++ b/.gitignore @@ -259,4 +259,7 @@ storage/ .Trashes .Trash-1000/ -post-provision \ No newline at end of file +post-provision + +# Local secrets for act +.secrets \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e1523f92..d9475d42 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: entry: prettier --write language: node additional_dependencies: ["prettier@3.6.2"] - files: \.(ts|tsx|js|jsx|json|md|yaml|yml)$ + files: \.(ts|tsx|js|jsx|json|md|mdx|yaml|yml)$ - repo: local hooks: diff --git a/.vscode/launch.json b/.vscode/launch.json index acfd84b2..58701566 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -10,7 +10,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { @@ -23,7 +23,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { @@ -36,7 +36,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { @@ -49,20 +49,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" - } - }, - { - "name": "Run PyLume Examples", - "type": "debugpy", - "request": "launch", - "program": "examples/pylume_examples.py", - "console": "integratedTerminal", - "justMyCode": true, - "python": "${workspaceFolder:cua-root}/.venv/bin/python", - "cwd": "${workspaceFolder:cua-root}", - "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { @@ -84,7 +71,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { @@ -106,7 +93,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { @@ -119,7 +106,7 @@ "python": "${workspaceFolder:cua-root}/.venv/bin/python", "cwd": "${workspaceFolder:cua-root}", "env": { - "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som:${workspaceFolder:cua-root}/libs/python/pylume" + "PYTHONPATH": "${workspaceFolder:cua-root}/libs/python/core:${workspaceFolder:cua-root}/libs/python/computer:${workspaceFolder:cua-root}/libs/python/agent:${workspaceFolder:cua-root}/libs/python/som" } }, { diff --git a/.vscode/py.code-workspace b/.vscode/py.code-workspace index 25324251..adb04695 100644 --- a/.vscode/py.code-workspace +++ b/.vscode/py.code-workspace @@ -20,10 +20,6 @@ "name": "computer-server", "path": "../libs/python/computer-server" }, - { - "name": "pylume", - "path": "../libs/python/pylume" - }, { "name": "core", "path": "../libs/python/core" @@ -51,7 +47,6 @@ "${workspaceFolder:cua-root}/libs/python/computer", "${workspaceFolder:cua-root}/libs/python/agent", "${workspaceFolder:cua-root}/libs/python/som", - "${workspaceFolder:cua-root}/libs/python/pylume", "${workspaceFolder:cua-root}/.vscode/typings" ], "python.envFile": "${workspaceFolder:cua-root}/.env", @@ -89,10 +84,6 @@ "name": "som", "depth": 2 }, - { - "name": "pylume", - "depth": 2 - }, { "name": "core", "depth": 2 @@ -103,7 +94,6 @@ "${workspaceFolder:cua-root}/libs/python/computer", "${workspaceFolder:cua-root}/libs/python/agent", "${workspaceFolder:cua-root}/libs/python/som", - "${workspaceFolder:cua-root}/libs/python/pylume" ], "python.languageServer": "None", "[python]": { diff --git a/.vscode/settings.json b/.vscode/settings.json index ab4deb49..c06f93aa 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,6 +1,6 @@ { "python-envs.pythonProjects": [], - "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", + "python.defaultInterpreterPath": "${workspaceFolder}/.venv", "editor.formatOnSave": true, "editor.codeActionsOnSave": { "source.organizeImports": "explicit", diff --git a/Dockerfile b/Dockerfile index 9b9f3c47..579842a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ENV PYTHONUNBUFFERED=1 \ PYTHONDONTWRITEBYTECODE=1 \ PIP_NO_CACHE_DIR=1 \ PIP_DISABLE_PIP_VERSION_CHECK=1 \ - PYTHONPATH="/app/libs/python/core:/app/libs/python/computer:/app/libs/python/agent:/app/libs/python/som:/app/libs/python/pylume:/app/libs/python/computer-server:/app/libs/python/mcp-server" + PYTHONPATH="/app/libs/python/core:/app/libs/python/computer:/app/libs/python/agent:/app/libs/python/som:/app/libs/python/computer-server:/app/libs/python/mcp-server" # Install system dependencies for ARM architecture RUN apt-get update && apt-get install -y --no-install-recommends \ diff --git a/README.md b/README.md index 2a43f3b7..236f04d4 100644 --- a/README.md +++ b/README.md @@ -207,17 +207,17 @@ The following table shows which capabilities are supported by each model: | Model | Computer-Use | Grounding | Tools | VLM | | -------------------------------------------------------------------------------------------------------------------------------- | :----------: | :-------: | :---: | :-: | -| [Claude Sonnet/Haiku](https://docs.claude.com/en/docs/agents-and-tools/tool-use/computer-use-tool#how-to-implement-computer-use) | ✓ | ✓ | ✓ | ✓ | -| [OpenAI CU Preview](https://platform.openai.com/docs/models/computer-use-preview) | ✓ | ✓ | | ✓ | -| [GLM-V](https://huggingface.co/THUDM/glm-4v-9b) | ✓ | ✓ | ✓ | ✓ | -| [Gemini CU Preview](https://ai.google.dev/gemini-api/docs/computer-use) | ✓ | ✓ | | ✓ | -| [InternVL](https://huggingface.co/OpenGVLab/InternVL3_5-1B) | ✓ | ✓ | ✓ | ✓ | -| [UI-TARS](https://huggingface.co/ByteDance-Seed/UI-TARS-1.5-7B) | ✓ | ✓ | ✓ | ✓ | -| [OpenCUA](https://huggingface.co/xlangai/OpenCUA-7B) | | ✓ | | | -| [GTA](https://huggingface.co/HelloKKMe/GTA1-7B) | | ✓ | | | -| [Holo](https://huggingface.co/Hcompany/Holo1.5-3B) | | ✓ | | | -| [Moondream](https://huggingface.co/moondream/moondream3-preview) | | ✓ | | | -| [OmniParser](https://github.com/microsoft/OmniParser) | | ✓ | | | +| [Claude Sonnet/Haiku](https://docs.claude.com/en/docs/agents-and-tools/tool-use/computer-use-tool#how-to-implement-computer-use) | 🖥️ | 🎯 | 🛠️ | 👁️ | +| [OpenAI CU Preview](https://platform.openai.com/docs/models/computer-use-preview) | 🖥️ | 🎯 | | 👁️ | +| [GLM-V](https://huggingface.co/THUDM/glm-4v-9b) | 🖥️ | 🎯 | 🛠️ | 👁️ | +| [Gemini CU Preview](https://ai.google.dev/gemini-api/docs/computer-use) | 🖥️ | 🎯 | | 👁️ | +| [InternVL](https://huggingface.co/OpenGVLab/InternVL3_5-1B) | 🖥️ | 🎯 | 🛠️ | 👁️ | +| [UI-TARS](https://huggingface.co/ByteDance-Seed/UI-TARS-1.5-7B) | 🖥️ | 🎯 | 🛠️ | 👁️ | +| [OpenCUA](https://huggingface.co/xlangai/OpenCUA-7B) | | 🎯 | | | +| [GTA](https://huggingface.co/HelloKKMe/GTA1-7B) | | 🎯 | | | +| [Holo](https://huggingface.co/Hcompany/Holo1.5-3B) | | 🎯 | | | +| [Moondream](https://huggingface.co/moondream/moondream3-preview) | | 🎯 | | | +| [OmniParser](https://github.com/microsoft/OmniParser) | | 🎯 | | | ### Model IDs diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 00000000..75deb04c --- /dev/null +++ b/TESTING.md @@ -0,0 +1,106 @@ +# Testing Guide for CUA + +Quick guide to running tests and understanding the test architecture. + +## 🚀 Quick Start + +```bash +# Install dependencies +pip install pytest pytest-asyncio pytest-mock pytest-cov + +# Install package +cd libs/python/core +pip install -e . + +# Run tests +export CUA_TELEMETRY_DISABLED=1 # or $env:CUA_TELEMETRY_DISABLED="1" on Windows +pytest tests/ -v +``` + +## 🧪 Running Tests + +```bash +# All packages +pytest libs/python/*/tests/ -v + +# Specific package +cd libs/python/core && pytest tests/ -v + +# With coverage +pytest tests/ --cov --cov-report=html + +# Specific test +pytest tests/test_telemetry.py::TestTelemetryEnabled::test_telemetry_enabled_by_default -v +``` + +## 🏗️ Test Architecture + +**Principles**: SRP (Single Responsibility) + Vertical Slices + Testability + +``` +libs/python/ +├── core/tests/ # Tests ONLY core +├── agent/tests/ # Tests ONLY agent +└── computer/tests/ # Tests ONLY computer +``` + +Each test file = ONE feature. Each test class = ONE concern. + +## ➕ Adding New Tests + +1. Create `test_*.py` in the appropriate package's `tests/` directory +2. Follow the pattern: + +```python +"""Unit tests for my_feature.""" +import pytest +from unittest.mock import patch + +class TestMyFeature: + """Test MyFeature class.""" + + def test_initialization(self): + """Test that feature initializes.""" + from my_package import MyFeature + feature = MyFeature() + assert feature is not None +``` + +3. Mock external dependencies: + +```python +@pytest.fixture +def mock_api(): + with patch("my_package.api_client") as mock: + yield mock +``` + +## 🔄 CI/CD + +Tests run automatically on every PR via GitHub Actions (`.github/workflows/python-tests.yml`): + +- Matrix strategy: each package tested separately +- Python 3.12 +- ~2 minute runtime + +## 🐛 Troubleshooting + +**ModuleNotFoundError**: Run `pip install -e .` in package directory + +**Tests fail in CI but pass locally**: Set `CUA_TELEMETRY_DISABLED=1` + +**Async tests error**: Install `pytest-asyncio` and use `@pytest.mark.asyncio` + +**Mock not working**: Patch at usage location, not definition: + +```python +# ✅ Right +@patch("my_package.module.external_function") + +# ❌ Wrong +@patch("external_library.function") +``` + +--- + +**Questions?** Check existing tests for examples or open an issue. diff --git a/docs/content/docs/agent-sdk/agent-loops.mdx b/docs/content/docs/agent-sdk/agent-loops.mdx index 08dcf07b..625509b7 100644 --- a/docs/content/docs/agent-sdk/agent-loops.mdx +++ b/docs/content/docs/agent-sdk/agent-loops.mdx @@ -3,7 +3,13 @@ title: Agent Loops description: Supported computer-using agent loops and models --- -A corresponding Jupyter Notebook is available for this documentation. + + A corresponding{' '} + + Jupyter Notebook + {' '} + is available for this documentation. + An agent can be thought of as a loop - it generates actions, executes them, and repeats until done: @@ -102,7 +108,7 @@ messages = [ "content": "Take a screenshot and describe what you see" }, { - "role": "assistant", + "role": "assistant", "content": "I'll take a screenshot for you." } ] diff --git a/docs/content/docs/agent-sdk/benchmarks/index.mdx b/docs/content/docs/agent-sdk/benchmarks/index.mdx index 6397b2ec..685a8f92 100644 --- a/docs/content/docs/agent-sdk/benchmarks/index.mdx +++ b/docs/content/docs/agent-sdk/benchmarks/index.mdx @@ -4,13 +4,14 @@ description: Computer Agent SDK benchmarks for agentic GUI tasks --- The benchmark system evaluates models on GUI grounding tasks, specifically agent loop success rate and click prediction accuracy. It supports both: + - **Computer Agent SDK providers** (using model strings like `"huggingface-local/HelloKKMe/GTA1-7B"`) - **Reference agent implementations** (custom model classes implementing the `ModelProtocol`) ## Available Benchmarks - **[ScreenSpot-v2](./benchmarks/screenspot-v2)** - Standard resolution GUI grounding -- **[ScreenSpot-Pro](./benchmarks/screenspot-pro)** - High-resolution GUI grounding +- **[ScreenSpot-Pro](./benchmarks/screenspot-pro)** - High-resolution GUI grounding - **[Interactive Testing](./benchmarks/interactive)** - Real-time testing and visualization ## Quick Start diff --git a/docs/content/docs/agent-sdk/benchmarks/introduction.mdx b/docs/content/docs/agent-sdk/benchmarks/introduction.mdx index 7f15b6a8..67a90769 100644 --- a/docs/content/docs/agent-sdk/benchmarks/introduction.mdx +++ b/docs/content/docs/agent-sdk/benchmarks/introduction.mdx @@ -8,6 +8,7 @@ The Cua agent framework uses benchmarks to test the performance of supported mod ## Benchmark Types Computer-Agent benchmarks evaluate two key capabilities: + - **Plan Generation**: Breaking down complex tasks into a sequence of actions - **Coordinate Generation**: Predicting precise click locations on GUI elements @@ -31,7 +32,7 @@ agent.run("Open Firefox and go to github.com") ### Coordinate Generation Only -**[GUI Agent Grounding Leaderboard](https://gui-agent.github.io/grounding-leaderboard/)** - Benchmark for click prediction accuracy +**[GUI Agent Grounding Leaderboard](https://gui-agent.github.io/grounding-leaderboard/)** - Benchmark for click prediction accuracy This leaderboard tests models that specialize in finding exactly where to click on screen elements, but needs to be told what specific action to take. @@ -41,7 +42,7 @@ This leaderboard tests models that specialize in finding exactly where to click agent = ComputerAgent("huggingface-local/HelloKKMe/GTA1-7B", tools=[computer]) agent.predict_click("find the button to open the settings") # (27, 450) # This will raise an error: -# agent.run("Open Firefox and go to github.com") +# agent.run("Open Firefox and go to github.com") ``` ### Composed Agent diff --git a/docs/content/docs/agent-sdk/benchmarks/osworld-verified.mdx b/docs/content/docs/agent-sdk/benchmarks/osworld-verified.mdx index 1bfcfeea..26e4b7e4 100644 --- a/docs/content/docs/agent-sdk/benchmarks/osworld-verified.mdx +++ b/docs/content/docs/agent-sdk/benchmarks/osworld-verified.mdx @@ -5,4 +5,4 @@ description: Benchmark ComputerAgent on OSWorld tasks using HUD OSWorld-Verified is a curated subset of OSWorld tasks that can be run using the HUD framework. -Use [ComputerAgent with HUD](../integrations/hud) to benchmark on these tasks. \ No newline at end of file +Use [ComputerAgent with HUD](../integrations/hud) to benchmark on these tasks. diff --git a/docs/content/docs/agent-sdk/benchmarks/screenspot-pro.mdx b/docs/content/docs/agent-sdk/benchmarks/screenspot-pro.mdx index 402b919e..15739d9d 100644 --- a/docs/content/docs/agent-sdk/benchmarks/screenspot-pro.mdx +++ b/docs/content/docs/agent-sdk/benchmarks/screenspot-pro.mdx @@ -18,8 +18,8 @@ python ss-pro.py --samples 50 ## Results -| Model | Accuracy | Failure Rate | Samples | -|-------|----------|--------------|---------| -| Coming Soon | - | - | - | +| Model | Accuracy | Failure Rate | Samples | +| ----------- | -------- | ------------ | ------- | +| Coming Soon | - | - | - | Results will be populated after running benchmarks with various models. diff --git a/docs/content/docs/agent-sdk/benchmarks/screenspot-v2.mdx b/docs/content/docs/agent-sdk/benchmarks/screenspot-v2.mdx index 6cfcf1c1..ba78d5f9 100644 --- a/docs/content/docs/agent-sdk/benchmarks/screenspot-v2.mdx +++ b/docs/content/docs/agent-sdk/benchmarks/screenspot-v2.mdx @@ -18,8 +18,8 @@ python ss-v2.py --samples 100 ## Results -| Model | Accuracy | Failure Rate | Samples | -|-------|----------|--------------|---------| -| Coming Soon | - | - | - | +| Model | Accuracy | Failure Rate | Samples | +| ----------- | -------- | ------------ | ------- | +| Coming Soon | - | - | - | Results will be populated after running benchmarks with various models. diff --git a/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx b/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx index 494c4a8f..1fb4afe7 100644 --- a/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx +++ b/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx @@ -10,30 +10,39 @@ Callbacks provide hooks into the agent lifecycle for extensibility. They're call ## Callback Lifecycle ### 1. `on_run_start(kwargs, old_items)` + Called once when agent run begins. Initialize tracking, logging, or state. ### 2. `on_run_continue(kwargs, old_items, new_items)` → bool + Called before each iteration. Return `False` to stop execution (e.g., budget limits). ### 3. `on_llm_start(messages)` → messages + Preprocess messages before LLM call. Use for PII anonymization, image retention. ### 4. `on_api_start(kwargs)` + Called before each LLM API call. ### 5. `on_api_end(kwargs, result)` + Called after each LLM API call completes. ### 6. `on_usage(usage)` + Called when usage information is received from LLM. ### 7. `on_llm_end(messages)` → messages + Postprocess messages after LLM call. Use for PII deanonymization. ### 8. `on_responses(kwargs, responses)` + Called when responses are received from agent loop. ### 9. Response-specific hooks: + - `on_text(item)` - Text messages - `on_computer_call_start(item)` - Before computer actions - `on_computer_call_end(item, result)` - After computer actions @@ -42,4 +51,5 @@ Called when responses are received from agent loop. - `on_screenshot(screenshot, name)` - When screenshots are taken ### 10. `on_run_end(kwargs, old_items, new_items)` -Called when agent run completes. Finalize tracking, save trajectories. \ No newline at end of file + +Called when agent run completes. Finalize tracking, save trajectories. diff --git a/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx b/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx index 0787b1f6..4a76dc95 100644 --- a/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx +++ b/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx @@ -36,6 +36,7 @@ agent = ComputerAgent( ``` **Or with options:** + ```python # Advanced budget configuration agent = ComputerAgent( diff --git a/docs/content/docs/agent-sdk/callbacks/index.mdx b/docs/content/docs/agent-sdk/callbacks/index.mdx index 590c236a..71b63a2e 100644 --- a/docs/content/docs/agent-sdk/callbacks/index.mdx +++ b/docs/content/docs/agent-sdk/callbacks/index.mdx @@ -15,7 +15,7 @@ Built-in callbacks can be used as follows: ```python from agent.callbacks import ( ImageRetentionCallback, - TrajectorySaverCallback, + TrajectorySaverCallback, BudgetManagerCallback, LoggingCallback ) @@ -52,12 +52,12 @@ class CustomCallback(AsyncCallbackHandler): """Preprocess messages before LLM call""" # Add custom preprocessing logic return messages - + async def on_llm_end(self, messages): """Postprocess messages after LLM call""" # Add custom postprocessing logic return messages - + async def on_usage(self, usage): """Track usage information""" print(f"Tokens used: {usage.total_tokens}") diff --git a/docs/content/docs/agent-sdk/callbacks/logging.mdx b/docs/content/docs/agent-sdk/callbacks/logging.mdx index 8ab9b2e6..2ed3dda8 100644 --- a/docs/content/docs/agent-sdk/callbacks/logging.mdx +++ b/docs/content/docs/agent-sdk/callbacks/logging.mdx @@ -18,7 +18,7 @@ agent = ComputerAgent( tools=[computer], callbacks=[ LoggingCallback( - logger=logging.getLogger("cua"), + logger=logging.getLogger("cua"), level=logging.INFO ) ] @@ -47,7 +47,7 @@ class CustomLogger(AsyncCallbackHandler): def __init__(self, logger_name="agent"): self.logger = logging.getLogger(logger_name) self.logger.setLevel(logging.INFO) - + # Add console handler handler = logging.StreamHandler() formatter = logging.Formatter( @@ -55,18 +55,18 @@ class CustomLogger(AsyncCallbackHandler): ) handler.setFormatter(formatter) self.logger.addHandler(handler) - + async def on_run_start(self, kwargs, old_items): self.logger.info(f"Agent run started with model: {kwargs.get('model')}") - + async def on_computer_call_start(self, item): action = item.get('action', {}) self.logger.info(f"Computer action: {action.get('type')}") - + async def on_usage(self, usage): cost = usage.get('response_cost', 0) self.logger.info(f"API call cost: ${cost:.4f}") - + async def on_run_end(self, kwargs, old_items, new_items): self.logger.info("Agent run completed") @@ -81,6 +81,7 @@ agent = ComputerAgent( ## Available Hooks Log any agent event using these callback methods: + - `on_run_start/end` - Run lifecycle - `on_computer_call_start/end` - Computer actions - `on_api_start/end` - LLM API calls diff --git a/docs/content/docs/agent-sdk/callbacks/trajectories.mdx b/docs/content/docs/agent-sdk/callbacks/trajectories.mdx index 8118f217..b139d9a2 100644 --- a/docs/content/docs/agent-sdk/callbacks/trajectories.mdx +++ b/docs/content/docs/agent-sdk/callbacks/trajectories.mdx @@ -40,6 +40,7 @@ View trajectories in the browser at: **[trycua.com/trajectory-viewer](http://trycua.com/trajectory-viewer)** The viewer provides: + - Interactive conversation replay - Screenshot galleries - No data collection @@ -47,11 +48,13 @@ The viewer provides: ## Trajectory Structure Trajectories are saved with: + - Complete conversation history - Usage statistics and costs - Timestamps and metadata - Screenshots and computer actions Each trajectory contains: + - **metadata.json**: Run info, timestamps, usage stats (`total_tokens`, `response_cost`) - **turn_000/**: Turn-by-turn conversation history (api calls, responses, computer calls, screenshots) diff --git a/docs/content/docs/agent-sdk/custom-computer-handlers.mdx b/docs/content/docs/agent-sdk/custom-computer-handlers.mdx index e087fc21..c76a5d66 100644 --- a/docs/content/docs/agent-sdk/custom-computer-handlers.mdx +++ b/docs/content/docs/agent-sdk/custom-computer-handlers.mdx @@ -53,67 +53,67 @@ from typing import Literal, List, Dict, Union, Optional class MyCustomComputer(AsyncComputerHandler): """Custom computer handler implementation.""" - + def __init__(self): # Initialize your custom computer interface here pass - - # ==== Computer-Use-Preview Action Space ==== + + # ==== Computer-Use-Preview Action Space ==== async def get_environment(self) -> Literal["windows", "mac", "linux", "browser"]: """Get the current environment type.""" ... - + async def get_dimensions(self) -> tuple[int, int]: """Get screen dimensions as (width, height).""" ... - + async def screenshot(self) -> str: """Take a screenshot and return as base64 string.""" ... - + async def click(self, x: int, y: int, button: str = "left") -> None: """Click at coordinates with specified button.""" ... - + async def double_click(self, x: int, y: int) -> None: """Double click at coordinates.""" ... - + async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None: """Scroll at coordinates with specified scroll amounts.""" ... - + async def type(self, text: str) -> None: """Type text.""" ... - + async def wait(self, ms: int = 1000) -> None: """Wait for specified milliseconds.""" ... - + async def move(self, x: int, y: int) -> None: """Move cursor to coordinates.""" ... - + async def keypress(self, keys: Union[List[str], str]) -> None: """Press key combination.""" ... - + async def drag(self, path: List[Dict[str, int]]) -> None: """Drag along specified path.""" ... - + async def get_current_url(self) -> str: """Get current URL (for browser environments).""" ... - - # ==== Anthropic Action Space ==== + + # ==== Anthropic Action Space ==== async def left_mouse_down(self, x: Optional[int] = None, y: Optional[int] = None) -> None: """Left mouse down at coordinates.""" ... - + async def left_mouse_up(self, x: Optional[int] = None, y: Optional[int] = None) -> None: """Left mouse up at coordinates.""" ... @@ -127,4 +127,4 @@ agent = ComputerAgent( ) await agent.run("Take a screenshot and click at coordinates 100, 200") -``` \ No newline at end of file +``` diff --git a/docs/content/docs/agent-sdk/customizing-computeragent.mdx b/docs/content/docs/agent-sdk/customizing-computeragent.mdx index dac0d35f..e7d3c030 100644 --- a/docs/content/docs/agent-sdk/customizing-computeragent.mdx +++ b/docs/content/docs/agent-sdk/customizing-computeragent.mdx @@ -2,7 +2,16 @@ title: Customizing Your ComputerAgent --- -A corresponding Jupyter Notebook is available for this documentation. + + A corresponding{' '} + + Jupyter Notebook + {' '} + is available for this documentation. + The `ComputerAgent` interface provides an easy proxy to any computer-using model configuration, and it is a powerful framework for extending and building your own agentic systems. @@ -118,4 +127,4 @@ await run_single_task( # tools=[your_custom_function], # callbacks=[YourCustomCallback()], ) -``` \ No newline at end of file +``` diff --git a/docs/content/docs/agent-sdk/integrations/hud.mdx b/docs/content/docs/agent-sdk/integrations/hud.mdx index f102e0a1..7bfcbdea 100644 --- a/docs/content/docs/agent-sdk/integrations/hud.mdx +++ b/docs/content/docs/agent-sdk/integrations/hud.mdx @@ -3,7 +3,13 @@ title: HUD Evals description: Use ComputerAgent with HUD for benchmarking and evaluation --- -A corresponding Jupyter Notebook is available for this documentation. + + A corresponding{' '} + + Jupyter Notebook + {' '} + is available for this documentation. + The HUD integration allows an agent to be benchmarked using the [HUD framework](https://www.hud.so/). Through the HUD integration, the agent controls a computer inside HUD, where tests are run to evaluate the success of each task. @@ -120,8 +126,8 @@ Both single-task and full-dataset runs share a common set of configuration optio HUD provides multiple benchmark datasets for realistic evaluation. 1. **[OSWorld-Verified](/agent-sdk/benchmarks/osworld-verified)** – Benchmark on 369+ real-world desktop tasks across Chrome, LibreOffice, GIMP, VS Code, etc. - *Best for*: evaluating full computer-use agents in realistic environments. - *Verified variant*: fixes 300+ issues from earlier versions for reliability. + _Best for_: evaluating full computer-use agents in realistic environments. + _Verified variant_: fixes 300+ issues from earlier versions for reliability. **Coming soon:** SheetBench (spreadsheet automation) and other specialized HUD datasets. @@ -129,7 +135,7 @@ See the [HUD docs](https://docs.hud.so/environment-creation) for more eval envir ## Tips -* **Debugging:** set `verbosity=2` to see every model call and tool action. -* **Performance:** lower `screenshot_delay` for faster runs; raise it if you see race conditions. -* **Safety:** always set `max_steps` (defaults to 50) to prevent runaway loops. -* **Custom tools:** pass extra `tools=[...]` into the agent config if you need beyond `openai_computer`. \ No newline at end of file +- **Debugging:** set `verbosity=2` to see every model call and tool action. +- **Performance:** lower `screenshot_delay` for faster runs; raise it if you see race conditions. +- **Safety:** always set `max_steps` (defaults to 50) to prevent runaway loops. +- **Custom tools:** pass extra `tools=[...]` into the agent config if you need beyond `openai_computer`. diff --git a/docs/content/docs/agent-sdk/migration-guide.mdx b/docs/content/docs/agent-sdk/migration-guide.mdx index 89ee706e..ec75ab7a 100644 --- a/docs/content/docs/agent-sdk/migration-guide.mdx +++ b/docs/content/docs/agent-sdk/migration-guide.mdx @@ -20,7 +20,9 @@ This guide lists **breaking changes** when migrating from the original `Computer ## Usage Examples: Old vs New ### 1. Anthropic Loop + **Old:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -31,7 +33,9 @@ async with Computer() as computer: async for result in agent.run("Take a screenshot"): print(result) ``` + **New:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -46,7 +50,9 @@ async with Computer() as computer: ``` ### 2. OpenAI Loop + **Old:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -57,7 +63,9 @@ async with Computer() as computer: async for result in agent.run("Take a screenshot"): print(result) ``` + **New:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -72,7 +80,9 @@ async with Computer() as computer: ``` ### 3. UI-TARS Loop + **Old:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -83,7 +93,9 @@ async with Computer() as computer: async for result in agent.run("Take a screenshot"): print(result) ``` + **New:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -98,7 +110,9 @@ async with Computer() as computer: ``` ### 4. Omni Loop + **Old:** + ```python async with Computer() as computer: agent = ComputerAgent( @@ -109,7 +123,9 @@ async with Computer() as computer: async for result in agent.run("Take a screenshot"): print(result) ``` + **New:** + ```python async with Computer() as computer: agent = ComputerAgent( diff --git a/docs/content/docs/agent-sdk/prompt-caching.mdx b/docs/content/docs/agent-sdk/prompt-caching.mdx index 721895c5..cdcf7db5 100644 --- a/docs/content/docs/agent-sdk/prompt-caching.mdx +++ b/docs/content/docs/agent-sdk/prompt-caching.mdx @@ -26,7 +26,7 @@ agent = ComputerAgent( When using Anthropic-based CUAs (Claude models), setting `use_prompt_caching=True` will automatically add `{ "cache_control": "ephemeral" }` to your messages. This enables prompt caching for the session and can speed up repeated runs with the same prompt. -This argument is only required for Anthropic CUAs. For other providers, it is ignored. + This argument is only required for Anthropic CUAs. For other providers, it is ignored. ## OpenAI Provider @@ -44,13 +44,16 @@ agent = ComputerAgent( ``` ## Implementation Details + - For Anthropic: Adds `{ "cache_control": "ephemeral" }` to messages when enabled. - For OpenAI: Caching is automatic for long prompts; the argument is ignored. ## When to Use + - Enable for Anthropic CUAs if you want to avoid reprocessing the same prompt in repeated or iterative tasks. - Not needed for OpenAI models unless you want explicit ephemeral cache control (not required for most users). ## See Also + - [Agent Loops](./agent-loops) - [Migration Guide](./migration-guide) diff --git a/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx b/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx index 593ca84b..4e389365 100644 --- a/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx @@ -59,7 +59,7 @@ Combine state-of-the-art grounding with powerful reasoning: ```python agent = ComputerAgent( - "huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-3-5-sonnet-20241022", + "huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-3-5-sonnet-20241022", tools=[computer] ) diff --git a/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx b/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx index a3384b21..9621e520 100644 --- a/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx @@ -65,6 +65,7 @@ async for _ in agent.run("Click on the search bar and type 'hello world'"): ## InternVL 3.5 InternVL 3.5 family: + - `huggingface-local/OpenGVLab/InternVL3_5-{1B,2B,4B,8B,...}` ```python @@ -76,6 +77,7 @@ async for _ in agent.run("Open Firefox and navigate to github.com"): ## Qwen3 VL Qwen3 VL family: + - `openrouter/qwen/qwen3-vl-235b-a22b-instruct` ```python diff --git a/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx b/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx index 20e95ddb..1f12de9a 100644 --- a/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx @@ -17,9 +17,11 @@ All models that support `ComputerAgent.run()` also support `ComputerAgent.predic - Claude 3.5: `claude-3-5-sonnet-20241022` ### OpenAI CUA Preview + - Computer-use-preview: `computer-use-preview` ### UI-TARS 1.5 (Unified VLM with grounding support) + - `huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B` - `huggingface/ByteDance-Seed/UI-TARS-1.5-7B` (requires TGI endpoint) @@ -28,15 +30,19 @@ All models that support `ComputerAgent.run()` also support `ComputerAgent.predic These models are optimized specifically for click prediction and UI element grounding: ### OpenCUA + - `huggingface-local/xlangai/OpenCUA-{7B,32B}` ### GTA1 Family + - `huggingface-local/HelloKKMe/GTA1-{7B,32B,72B}` ### Holo 1.5 Family + - `huggingface-local/Hcompany/Holo1.5-{3B,7B,72B}` ### InternVL 3.5 Family + - `huggingface-local/OpenGVLab/InternVL3_5-{1B,2B,4B,8B,...}` ### OmniParser (OCR) diff --git a/docs/content/docs/agent-sdk/supported-model-providers/index.mdx b/docs/content/docs/agent-sdk/supported-model-providers/index.mdx index 68e372b1..9177e712 100644 --- a/docs/content/docs/agent-sdk/supported-model-providers/index.mdx +++ b/docs/content/docs/agent-sdk/supported-model-providers/index.mdx @@ -5,6 +5,7 @@ title: Supported Model Providers ## Supported Models ### Anthropic Claude (Computer Use API) + ```python model="anthropic/claude-3-5-sonnet-20241022" model="anthropic/claude-3-7-sonnet-20250219" @@ -13,20 +14,23 @@ model="anthropic/claude-sonnet-4-20250514" ``` ### OpenAI Computer Use Preview + ```python model="openai/computer-use-preview" ``` ### UI-TARS (Local or Huggingface Inference) + ```python model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B" model="ollama_chat/0000/ui-tars-1.5-7b" ``` ### Omniparser + Any LLM + ```python model="omniparser+ollama_chat/mistral-small3.2" model="omniparser+vertex_ai/gemini-pro" model="omniparser+anthropic/claude-3-5-sonnet-20241022" model="omniparser+openai/gpt-4o" -``` \ No newline at end of file +``` diff --git a/docs/content/docs/agent-sdk/usage-tracking.mdx b/docs/content/docs/agent-sdk/usage-tracking.mdx index 2709d738..425c694e 100644 --- a/docs/content/docs/agent-sdk/usage-tracking.mdx +++ b/docs/content/docs/agent-sdk/usage-tracking.mdx @@ -51,7 +51,7 @@ class UsageTrackerCallback(AsyncCallbackHandler): print("Usage update:", usage) agent = ComputerAgent( - ..., + ..., callbacks=[UsageTrackerCallback()] ) ``` @@ -59,5 +59,6 @@ agent = ComputerAgent( See also: [Budget Manager Callbacks](./callbacks/cost-saving) ## See Also + - [Prompt Caching](./prompt-caching) - [Callbacks](./callbacks) diff --git a/docs/content/docs/computer-sdk/cloud-vm-management.mdx b/docs/content/docs/computer-sdk/cloud-vm-management.mdx index 2c8f09db..89af7639 100644 --- a/docs/content/docs/computer-sdk/cloud-vm-management.mdx +++ b/docs/content/docs/computer-sdk/cloud-vm-management.mdx @@ -5,7 +5,6 @@ description: Manage your Cua Cloud sandboxes (VMs) via Python SDK or HTTP API import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; - Using the Cua Cloud API, you can manage your Cua Cloud sandboxes (VMs) with Python or HTTP (curl). All examples require a CUA API key. You can obtain one from the [Dashboard](https://www.cua.ai/dashboard/keys). @@ -14,110 +13,116 @@ All examples require a CUA API key. You can obtain one from the [Dashboard](http ## List VMs - + - ```python - import os - import asyncio - from computer.providers.cloud.provider import CloudProvider +```python +import os +import asyncio +from computer.providers.cloud.provider import CloudProvider - async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" - # Optional: point to a different API base - # os.environ["CUA_API_BASE"] = "https://api.cua.ai" +async def main(): + api_key = os.getenv("CUA_API_KEY") or "your-api-key" + # Optional: point to a different API base + # os.environ["CUA_API_BASE"] = "https://api.cua.ai" - provider = CloudProvider(api_key=api_key, verbose=False) - async with provider: - vms = await provider.list_vms() - for vm in vms: - print({ - "name": vm["name"], - "status": vm["status"], - "api_url": vm.get("api_url"), - "vnc_url": vm.get("vnc_url"), - }) + provider = CloudProvider(api_key=api_key, verbose=False) + async with provider: + vms = await provider.list_vms() + for vm in vms: + print({ + "name": vm["name"], + "status": vm["status"], + "api_url": vm.get("api_url"), + "vnc_url": vm.get("vnc_url"), + }) - if __name__ == "__main__": - asyncio.run(main()) - ``` +if __name__ == "__main__": + asyncio.run(main()) +``` - ```bash - curl -H "Authorization: Bearer $CUA_API_KEY" \ - "https://api.cua.ai/v1/vms" - ``` +```bash +curl -H "Authorization: Bearer $CUA_API_KEY" \ + "https://api.cua.ai/v1/vms" +``` - Responses: - - 200: Array of minimal VM objects with fields `{ name, password, status }` - - 401: Unauthorized (missing/invalid API key) +Responses: - ```json - [ - { - "name": "s-windows-x4snp46ebf", - "password": "49b8daa3", - "status": "running" - } - ] - ``` +- 200: Array of minimal VM objects with fields `{ name, password, status }` +- 401: Unauthorized (missing/invalid API key) - Status values: +```json +[ + { + "name": "s-windows-x4snp46ebf", + "password": "49b8daa3", + "status": "running" + } +] +``` - - `pending`: VM deployment in progress - - `running`: VM is active and accessible - - `stopped`: VM is stopped but not terminated - - `terminated`: VM has been permanently destroyed - - `failed`: VM deployment or operation failed +Status values: - - +- `pending`: VM deployment in progress +- `running`: VM is active and accessible +- `stopped`: VM is stopped but not terminated +- `terminated`: VM has been permanently destroyed +- `failed`: VM deployment or operation failed + +--- + + + + --- ## Start a VM + Provide the VM name you want to start. - ```python - import os - import asyncio - from computer.providers.cloud.provider import CloudProvider +```python +import os +import asyncio +from computer.providers.cloud.provider import CloudProvider - async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" - name = "my-vm-name" # e.g., "m-linux-96lcxd2c2k" +async def main(): + api_key = os.getenv("CUA_API_KEY") or "your-api-key" + name = "my-vm-name" # e.g., "m-linux-96lcxd2c2k" - provider = CloudProvider(api_key=api_key) - async with provider: - resp = await provider.run_vm(name) - print(resp) # { "name": name, "status": "starting" } + provider = CloudProvider(api_key=api_key) + async with provider: + resp = await provider.run_vm(name) + print(resp) # { "name": name, "status": "starting" } - if __name__ == "__main__": - asyncio.run(main()) - ``` +if __name__ == "__main__": + asyncio.run(main()) +``` - ```bash - curl -X POST \ - -H "Authorization: Bearer $CUA_API_KEY" \ - "https://api.cua.ai/v1/vms/my-vm-name/start" -i - ``` +```bash +curl -X POST \ + -H "Authorization: Bearer $CUA_API_KEY" \ + "https://api.cua.ai/v1/vms/my-vm-name/start" -i +``` - Responses: - - 204: No Content (start accepted) - - 401: Unauthorized (missing/invalid API key) - - 404: VM not found or not owned by the user +Responses: - ```text - HTTP/1.1 204 No Content - ``` +- 204: No Content (start accepted) +- 401: Unauthorized (missing/invalid API key) +- 404: VM not found or not owned by the user + +```text +HTTP/1.1 204 No Content +``` @@ -125,46 +130,48 @@ Provide the VM name you want to start. --- ## Stop a VM + Stops the VM asynchronously. - ```python - import os - import asyncio - from computer.providers.cloud.provider import CloudProvider +```python +import os +import asyncio +from computer.providers.cloud.provider import CloudProvider - async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" - name = "my-vm-name" +async def main(): + api_key = os.getenv("CUA_API_KEY") or "your-api-key" + name = "my-vm-name" - provider = CloudProvider(api_key=api_key) - async with provider: - resp = await provider.stop_vm(name) - print(resp) # { "name": name, "status": "stopping" } + provider = CloudProvider(api_key=api_key) + async with provider: + resp = await provider.stop_vm(name) + print(resp) # { "name": name, "status": "stopping" } - if __name__ == "__main__": - asyncio.run(main()) - ``` +if __name__ == "__main__": + asyncio.run(main()) +``` - ```bash - curl -X POST \ - -H "Authorization: Bearer $CUA_API_KEY" \ - "https://api.cua.ai/v1/vms/my-vm-name/stop" - ``` +```bash +curl -X POST \ + -H "Authorization: Bearer $CUA_API_KEY" \ + "https://api.cua.ai/v1/vms/my-vm-name/stop" +``` - Responses: - - 202: Accepted with `{ "status": "stopping" }` - - 401: Unauthorized (missing/invalid API key) - - 404: VM not found or not owned by the user +Responses: - ```json - { "status": "stopping" } - ``` +- 202: Accepted with `{ "status": "stopping" }` +- 401: Unauthorized (missing/invalid API key) +- 404: VM not found or not owned by the user + +```json +{ "status": "stopping" } +``` @@ -172,46 +179,48 @@ Stops the VM asynchronously. --- ## Restart a VM + Restarts the VM asynchronously. - ```python - import os - import asyncio - from computer.providers.cloud.provider import CloudProvider +```python +import os +import asyncio +from computer.providers.cloud.provider import CloudProvider - async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" - name = "my-vm-name" +async def main(): + api_key = os.getenv("CUA_API_KEY") or "your-api-key" + name = "my-vm-name" - provider = CloudProvider(api_key=api_key) - async with provider: - resp = await provider.restart_vm(name) - print(resp) # { "name": name, "status": "restarting" } + provider = CloudProvider(api_key=api_key) + async with provider: + resp = await provider.restart_vm(name) + print(resp) # { "name": name, "status": "restarting" } - if __name__ == "__main__": - asyncio.run(main()) - ``` +if __name__ == "__main__": + asyncio.run(main()) +``` - ```bash - curl -X POST \ - -H "Authorization: Bearer $CUA_API_KEY" \ - "https://api.cua.ai/v1/vms/my-vm-name/restart" - ``` +```bash +curl -X POST \ + -H "Authorization: Bearer $CUA_API_KEY" \ + "https://api.cua.ai/v1/vms/my-vm-name/restart" +``` - Responses: - - 202: Accepted with `{ "status": "restarting" }` - - 401: Unauthorized (missing/invalid API key) - - 404: VM not found or not owned by the user +Responses: - ```json - { "status": "restarting" } - ``` +- 202: Accepted with `{ "status": "restarting" }` +- 401: Unauthorized (missing/invalid API key) +- 404: VM not found or not owned by the user + +```json +{ "status": "restarting" } +``` @@ -219,42 +228,44 @@ Restarts the VM asynchronously. --- ## Query a VM by name + Query the computer-server running on the VM. Useful for checking details like status or OS type. - ```python - import os - import asyncio - from computer.providers.cloud.provider import CloudProvider +```python +import os +import asyncio +from computer.providers.cloud.provider import CloudProvider - async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" - name = "my-vm-name" +async def main(): + api_key = os.getenv("CUA_API_KEY") or "your-api-key" + name = "my-vm-name" - provider = CloudProvider(api_key=api_key) - async with provider: - info = await provider.get_vm(name) - print(info) + provider = CloudProvider(api_key=api_key) + async with provider: + info = await provider.get_vm(name) + print(info) - if __name__ == "__main__": - asyncio.run(main()) - ``` +if __name__ == "__main__": + asyncio.run(main()) +``` - ```bash - curl "https://my-vm-name.containers.cloud.cua.ai:8443/status" - ``` +```bash +curl "https://my-vm-name.containers.cloud.cua.ai:8443/status" +``` - Responses: - - 200: Server available +Responses: - ```json - { "status": "ok", "os_type": "linux", "features": ["agent"] } - ``` +- 200: Server available + +```json +{ "status": "ok", "os_type": "linux", "features": ["agent"] } +``` diff --git a/docs/content/docs/computer-sdk/commands.mdx b/docs/content/docs/computer-sdk/commands.mdx index d8e80493..c7b5a39b 100644 --- a/docs/content/docs/computer-sdk/commands.mdx +++ b/docs/content/docs/computer-sdk/commands.mdx @@ -13,16 +13,77 @@ Execute shell commands and get detailed results: + ```python - # Run shell command result = await - computer.interface.run_command(cmd) # result.stdout, result.stderr, result.returncode + # Run shell command + result = await computer.interface.run_command(cmd) # result.stdout, result.stderr, result.returncode ``` + + ```typescript - // Run shell command const result = await - computer.interface.runCommand(cmd); // result.stdout, result.stderr, result.returncode + // Run shell command + const result = await computer.interface.runCommand(cmd); // result.stdout, result.stderr, result.returncode ``` + + + + +## Window Management + +Control application launching and windows: + + + + + ```python + # Launch applications + await computer.interface.launch("xfce4-terminal") + await computer.interface.launch("libreoffice --writer") + await computer.interface.open("https://www.google.com") + + # Window management + windows = await computer.interface.get_application_windows("xfce4-terminal") + window_id = windows[0] + await computer.interface.activate_window(window_id) + + window_id = await computer.interface.get_current_window_id() # get the current active window id + await computer.interface.window_size(window_id) + await computer.interface.get_window_title(window_id) + await computer.interface.get_window_position(window_id) + await computer.interface.set_window_size(window_id, 1200, 800) + await computer.interface.set_window_position(window_id, 100, 100) + await computer.interface.maximize_window(window_id) + await computer.interface.minimize_window(window_id) + await computer.interface.close_window(window_id) + ``` + + + + + ```typescript + // Launch applications + await computer.interface.launch("xfce4-terminal"); + await computer.interface.launch("libreoffice --writer"); + await computer.interface.open("https://www.google.com"); + + // Window management + const windows = await computer.interface.getApplicationWindows("xfce4-terminal"); + let windowId = windows[0]; + await computer.interface.activateWindow(windowId); + + windowId = await computer.interface.getCurrentWindowId(); // current active window id + await computer.interface.getWindowSize(windowId); + await computer.interface.getWindowName(windowId); + await computer.interface.getWindowPosition(windowId); + await computer.interface.setWindowSize(windowId, 1200, 800); + await computer.interface.setWindowPosition(windowId, 100, 100); + await computer.interface.maximizeWindow(windowId); + await computer.interface.minimizeWindow(windowId); + await computer.interface.closeWindow(windowId); + ``` + @@ -32,6 +93,7 @@ Precise mouse control and interaction: + ```python # Basic clicks await computer.interface.left_click(x, y) # Left click at coordinates @@ -50,6 +112,7 @@ Precise mouse control and interaction: + ```typescript // Basic clicks await computer.interface.leftClick(x, y); // Left click at coordinates @@ -75,6 +138,7 @@ Text input and key combinations: + ```python # Text input await computer.interface.type_text("Hello") # Type text @@ -88,6 +152,7 @@ Text input and key combinations: + ```typescript // Text input await computer.interface.typeText("Hello"); // Type text @@ -108,20 +173,24 @@ Mouse wheel and scrolling control: + ```python # Scrolling await computer.interface.scroll(x, y) # Scroll the mouse wheel - await computer.interface.scroll_down(clicks) # Scroll down await - computer.interface.scroll_up(clicks) # Scroll up + await computer.interface.scroll_down(clicks) # Scroll down + await computer.interface.scroll_up(clicks) # Scroll up ``` + - ```typescript - // Scrolling - await computer.interface.scroll(x, y); // Scroll the mouse wheel + + ```typescript + // Scrolling + await computer.interface.scroll(x, y); // Scroll the mouse wheel await computer.interface.scrollDown(clicks); // Scroll down - await computer.interface.scrollUp(clicks); // Scroll up + await computer.interface.scrollUp(clicks); // Scroll up ``` + @@ -131,21 +200,51 @@ Screen capture and display information: - ```python - # Screen operations - await computer.interface.screenshot() # Take a screenshot - await computer.interface.get_screen_size() # Get screen dimensions + ```python + # Screen operations + await computer.interface.screenshot() # Take a screenshot + await computer.interface.get_screen_size() # Get screen dimensions + ``` + + + + + ```typescript + // Screen operations + await computer.interface.screenshot(); // Take a screenshot + await computer.interface.getScreenSize(); // Get screen dimensions + ``` + + + + +## Desktop Actions + +Control desktop environment features like wallpaper: + + + + ```python + # Get current desktop environment (e.g., 'xfce4', 'gnome', 'kde', 'mac', 'windows') + env = await computer.interface.get_desktop_environment() + print(env) # "xfce4" + + # Set desktop wallpaper to an image file accessible on the VM + await computer.interface.set_wallpaper("/home/cua/shared/wallpaper.png") ``` ```typescript - // Screen operations - await computer.interface.screenshot(); // Take a screenshot - await computer.interface.getScreenSize(); // Get screen dimensions - + // Get current desktop environment + const env = await computer.interface.getDesktopEnvironment(); + print(env) # "xfce4" + + // Set desktop wallpaper to an image file accessible on the VM + await computer.interface.setWallpaper('/home/cua/shared/wallpaper.png'); ``` + @@ -155,20 +254,20 @@ System clipboard management: - ```python - # Clipboard operations await - computer.interface.set_clipboard(text) # Set clipboard content await - computer.interface.copy_to_clipboard() # Get clipboard content + ```python + # Clipboard operations + await computer.interface.set_clipboard(text) # Set clipboard content + await computer.interface.copy_to_clipboard() # Get clipboard content ``` - ```typescript - // Clipboard operations + + ```typescript + // Clipboard operations await computer.interface.setClipboard(text); // Set clipboard content await computer.interface.copyToClipboard(); // Get clipboard content - ``` @@ -201,18 +300,19 @@ Direct file and directory manipulation: + ```typescript - # File existence checks + // File existence checks await computer.interface.fileExists(path); // Check if file exists await computer.interface.directoryExists(path); // Check if directory exists - # File content operations + // File content operations await computer.interface.readText(path, "utf-8"); // Read file content await computer.interface.writeText(path, content, "utf-8"); // Write file content await computer.interface.readBytes(path); // Read file content as bytes await computer.interface.writeBytes(path, content); // Write file content as bytes - # File and directory management + // File and directory management await computer.interface.deleteFile(path); // Delete file await computer.interface.createDir(path); // Create directory await computer.interface.deleteDir(path); // Delete directory @@ -228,20 +328,21 @@ Access system accessibility information: - ```python - # Get accessibility tree - await computer.interface.get_accessibility_tree() + ```python + # Get accessibility tree + await computer.interface.get_accessibility_tree() ``` - ```typescript - // Get accessibility tree - await computer.interface.getAccessibilityTree(); -``` - + ```typescript + // Get accessibility tree + await computer.interface.getAccessibilityTree(); + ``` + + ## Delay Configuration @@ -250,6 +351,7 @@ Control timing between actions: + ```python # Set default delay between all actions (in seconds) computer.interface.delay = 0.5 # 500ms delay between actions @@ -269,6 +371,7 @@ Manage Python environments: + ```python # Virtual environment management await computer.venv_install("demo_venv", ["requests", "macos-pyxa"]) # Install packages in a virtual environment @@ -277,4 +380,4 @@ Manage Python environments: ``` - \ No newline at end of file + diff --git a/docs/content/docs/computer-sdk/computer-ui.mdx b/docs/content/docs/computer-sdk/computer-ui.mdx index 22b131c0..c731e4c4 100644 --- a/docs/content/docs/computer-sdk/computer-ui.mdx +++ b/docs/content/docs/computer-sdk/computer-ui.mdx @@ -10,7 +10,8 @@ pip install "cua-computer[ui]" ``` -For precise control of the computer, we recommend using VNC or Screen Sharing instead of the Computer Gradio UI. + For precise control of the computer, we recommend using VNC or Screen Sharing instead of the + Computer Gradio UI. ### Building and Sharing Demonstrations with Huggingface @@ -43,8 +44,12 @@ For examples, see [Computer UI Examples](https://github.com/trycua/cua/tree/main #### 3. Record Your Tasks
-View demonstration video - + View demonstration video +
Record yourself performing various computer tasks using the UI. @@ -52,8 +57,12 @@ Record yourself performing various computer tasks using the UI. #### 4. Save Your Demonstrations
-View demonstration video - + View demonstration video +
Save each task by picking a descriptive name and adding relevant tags (e.g., "office", "web-browsing", "coding"). @@ -65,11 +74,16 @@ Repeat steps 3 and 4 until you have a good amount of demonstrations covering dif #### 6. Upload to Huggingface
-View demonstration video - + View demonstration video +
Upload your dataset to Huggingface by: + - Naming it as `{your_username}/{dataset_name}` - Choosing public or private visibility - Optionally selecting specific tags to upload only tasks with certain tags @@ -77,4 +91,4 @@ Upload your dataset to Huggingface by: #### Examples and Resources - Example Dataset: [ddupont/test-dataset](https://huggingface.co/datasets/ddupont/test-dataset) -- Find Community Datasets: 🔍 [Browse CUA Datasets on Huggingface](https://huggingface.co/datasets?other=cua) \ No newline at end of file +- Find Community Datasets: 🔍 [Browse CUA Datasets on Huggingface](https://huggingface.co/datasets?other=cua) diff --git a/docs/content/docs/computer-sdk/computers.mdx b/docs/content/docs/computer-sdk/computers.mdx index d666bd99..e7437959 100644 --- a/docs/content/docs/computer-sdk/computers.mdx +++ b/docs/content/docs/computer-sdk/computers.mdx @@ -3,7 +3,17 @@ title: Cua Computers description: Understanding Cua computer types and connection methods --- -A corresponding Jupyter Notebook and NodeJS project are available for this documentation. + + A corresponding{' '} + + Jupyter Notebook + {' '} + and{' '} + + NodeJS project + {' '} + are available for this documentation. + Before we can automate apps using AI, we need to first connect to a Computer Server to give the AI a safe environment to execute workflows in. diff --git a/docs/content/docs/computer-sdk/meta.json b/docs/content/docs/computer-sdk/meta.json index ab69fa17..0bdf7598 100644 --- a/docs/content/docs/computer-sdk/meta.json +++ b/docs/content/docs/computer-sdk/meta.json @@ -1,5 +1,11 @@ { - "title": "Computer SDK", - "description": "Build computer-using agents with the Computer SDK", - "pages": ["computers", "cloud-vm-management", "commands", "computer-ui", "sandboxed-python"] + "title": "Computer SDK", + "description": "Build computer-using agents with the Computer SDK", + "pages": [ + "computers", + "commands", + "computer-ui", + "tracing-api", + "sandboxed-python" + ] } diff --git a/docs/content/docs/computer-sdk/sandboxed-python.mdx b/docs/content/docs/computer-sdk/sandboxed-python.mdx index 6d70e9a6..82d2809b 100644 --- a/docs/content/docs/computer-sdk/sandboxed-python.mdx +++ b/docs/content/docs/computer-sdk/sandboxed-python.mdx @@ -3,7 +3,16 @@ title: Sandboxed Python slug: sandboxed-python --- -A corresponding Python example is available for this documentation. + + A corresponding{' '} + + Python example + {' '} + is available for this documentation. + You can run Python functions securely inside a sandboxed virtual environment on a remote Cua Computer. This is useful for executing untrusted user code, isolating dependencies, or providing a safe environment for automation tasks. diff --git a/docs/content/docs/computer-sdk/tracing-api.mdx b/docs/content/docs/computer-sdk/tracing-api.mdx new file mode 100644 index 00000000..29c1410b --- /dev/null +++ b/docs/content/docs/computer-sdk/tracing-api.mdx @@ -0,0 +1,349 @@ +--- +title: Computer Tracing API +description: Record computer interactions for debugging, training, and analysis +--- + +# Computer Tracing API + +The Computer tracing API provides a powerful way to record computer interactions for debugging, training, analysis, and compliance purposes. Inspired by Playwright's tracing functionality, it offers flexible recording options and standardized output formats. + + +The tracing API addresses GitHub issue #299 by providing a unified recording interface that works with any Computer usage pattern, not just ComputerAgent. + + +## Overview + +The tracing API allows you to: + +- Record screenshots at key moments +- Log all API calls and their results +- Capture accessibility tree snapshots +- Add custom metadata +- Export recordings in standardized formats +- Support for both automated and human-in-the-loop workflows + +## Basic Usage + +### Starting and Stopping Traces + +```python +from computer import Computer + +computer = Computer(os_type="macos") +await computer.run() + +# Start tracing with default options +await computer.tracing.start() + +# Perform some operations +await computer.interface.left_click(100, 200) +await computer.interface.type_text("Hello, World!") +await computer.interface.press_key("enter") + +# Stop tracing and save +trace_path = await computer.tracing.stop() +print(f"Trace saved to: {trace_path}") +``` + +### Custom Configuration + +```python +# Start tracing with custom configuration +await computer.tracing.start({ + 'video': False, # Record video frames + 'screenshots': True, # Record screenshots (default: True) + 'api_calls': True, # Record API calls (default: True) + 'accessibility_tree': True, # Record accessibility snapshots + 'metadata': True, # Allow custom metadata (default: True) + 'name': 'my_custom_trace', # Custom trace name + 'path': './my_traces' # Custom output directory +}) + +# Add custom metadata during tracing +await computer.tracing.add_metadata('user_id', 'user123') +await computer.tracing.add_metadata('test_case', 'login_flow') + +# Stop with custom options +trace_path = await computer.tracing.stop({ + 'path': './exports/trace.zip', + 'format': 'zip' # 'zip' or 'dir' +}) +``` + +## Configuration Options + +### Start Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `video` | bool | `False` | Record video frames (future feature) | +| `screenshots` | bool | `True` | Capture screenshots after key actions | +| `api_calls` | bool | `True` | Log all interface method calls | +| `accessibility_tree` | bool | `False` | Record accessibility tree snapshots | +| `metadata` | bool | `True` | Enable custom metadata recording | +| `name` | str | auto-generated | Custom name for the trace | +| `path` | str | auto-generated | Custom directory for trace files | + +### Stop Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `path` | str | auto-generated | Custom output path for final trace | +| `format` | str | `'zip'` | Output format: `'zip'` or `'dir'` | + +## Use Cases + +### Custom Agent Development + +```python +from computer import Computer + +async def test_custom_agent(): + computer = Computer(os_type="linux") + await computer.run() + + # Start tracing for this test session + await computer.tracing.start({ + 'name': 'custom_agent_test', + 'screenshots': True, + 'accessibility_tree': True + }) + + # Your custom agent logic here + screenshot = await computer.interface.screenshot() + await computer.interface.left_click(500, 300) + await computer.interface.type_text("test input") + + # Add context about what the agent is doing + await computer.tracing.add_metadata('action', 'filling_form') + await computer.tracing.add_metadata('confidence', 0.95) + + # Save the trace + trace_path = await computer.tracing.stop() + return trace_path +``` + +### Training Data Collection + +```python +async def collect_training_data(): + computer = Computer(os_type="macos") + await computer.run() + + tasks = [ + "open_browser_and_search", + "create_document", + "send_email" + ] + + for task in tasks: + # Start a new trace for each task + await computer.tracing.start({ + 'name': f'training_{task}', + 'screenshots': True, + 'accessibility_tree': True, + 'metadata': True + }) + + # Add task metadata + await computer.tracing.add_metadata('task_type', task) + await computer.tracing.add_metadata('difficulty', 'beginner') + + # Perform the task (automated or human-guided) + await perform_task(computer, task) + + # Save this training example + await computer.tracing.stop({ + 'path': f'./training_data/{task}.zip' + }) +``` + +### Human-in-the-Loop Recording + +```python +async def record_human_demonstration(): + computer = Computer(os_type="windows") + await computer.run() + + # Start recording human demonstration + await computer.tracing.start({ + 'name': 'human_demo_excel_workflow', + 'screenshots': True, + 'api_calls': True, # Will capture any programmatic actions + 'metadata': True + }) + + print("Trace recording started. Perform your demonstration...") + print("The system will record all computer interactions.") + + # Add metadata about the demonstration + await computer.tracing.add_metadata('demonstrator', 'expert_user') + await computer.tracing.add_metadata('workflow', 'excel_data_analysis') + + # Human performs actions manually or through other tools + # Tracing will still capture any programmatic interactions + + input("Press Enter when demonstration is complete...") + + # Stop and save the demonstration + trace_path = await computer.tracing.stop() + print(f"Human demonstration saved to: {trace_path}") +``` + +### RPA Debugging + +```python +async def debug_rpa_workflow(): + computer = Computer(os_type="linux") + await computer.run() + + # Start tracing with full debugging info + await computer.tracing.start({ + 'name': 'rpa_debug_session', + 'screenshots': True, + 'accessibility_tree': True, + 'api_calls': True + }) + + try: + # Your RPA workflow + await rpa_login_sequence(computer) + await rpa_data_entry(computer) + await rpa_generate_report(computer) + + await computer.tracing.add_metadata('status', 'success') + + except Exception as e: + # Record the error in the trace + await computer.tracing.add_metadata('error', str(e)) + await computer.tracing.add_metadata('status', 'failed') + raise + finally: + # Always save the debug trace + trace_path = await computer.tracing.stop() + print(f"Debug trace saved to: {trace_path}") +``` + +## Output Format + +### Directory Structure + +When using `format='dir'`, traces are saved with this structure: + +``` +trace_20240922_143052_abc123/ +├── trace_metadata.json # Overall trace information +├── event_000001_trace_start.json +├── event_000002_api_call.json +├── event_000003_api_call.json +├── 000001_initial_screenshot.png +├── 000002_after_left_click.png +├── 000003_after_type_text.png +└── event_000004_trace_end.json +``` + +### Metadata Format + +The `trace_metadata.json` contains: + +```json +{ + "trace_id": "trace_20240922_143052_abc123", + "config": { + "screenshots": true, + "api_calls": true, + "accessibility_tree": false, + "metadata": true + }, + "start_time": 1695392252.123, + "end_time": 1695392267.456, + "duration": 15.333, + "total_events": 12, + "screenshot_count": 5, + "events": [...] // All events in chronological order +} +``` + +### Event Format + +Individual events follow this structure: + +```json +{ + "type": "api_call", + "timestamp": 1695392255.789, + "relative_time": 3.666, + "data": { + "method": "left_click", + "args": {"x": 100, "y": 200, "delay": null}, + "result": null, + "error": null, + "screenshot": "000002_after_left_click.png", + "success": true + } +} +``` + +## Integration with ComputerAgent + +The tracing API works seamlessly with existing ComputerAgent workflows: + +```python +from agent import ComputerAgent +from computer import Computer + +# Create computer and start tracing +computer = Computer(os_type="macos") +await computer.run() + +await computer.tracing.start({ + 'name': 'agent_with_tracing', + 'screenshots': True, + 'metadata': True +}) + +# Create agent using the same computer +agent = ComputerAgent( + model="openai/computer-use-preview", + tools=[computer] +) + +# Agent operations will be automatically traced +async for _ in agent.run("open trycua.com and navigate to docs"): + pass + +# Save the combined trace +trace_path = await computer.tracing.stop() +``` + +## Privacy Considerations + +The tracing API is designed with privacy in mind: + +- Clipboard content is not recorded (only content length) +- Screenshots can be disabled +- Sensitive text input can be filtered +- Custom metadata allows you to control what information is recorded + +## Comparison with ComputerAgent Trajectories + +| Feature | ComputerAgent Trajectories | Computer.tracing | +|---------|---------------------------|------------------| +| **Scope** | ComputerAgent only | Any Computer usage | +| **Flexibility** | Fixed format | Configurable options | +| **Custom Agents** | Not supported | Fully supported | +| **Human-in-the-loop** | Limited | Full support | +| **Real-time Control** | No | Start/stop anytime | +| **Output Format** | Agent-specific | Standardized | +| **Accessibility Data** | No | Optional | + +## Best Practices + +1. **Start tracing early**: Begin recording before your main workflow to capture the complete session +2. **Use meaningful names**: Provide descriptive trace names for easier organization +3. **Add contextual metadata**: Include information about what you're testing or demonstrating +4. **Handle errors gracefully**: Always stop tracing in a finally block +5. **Choose appropriate options**: Only record what you need to minimize overhead +6. **Organize output**: Use custom paths to organize traces by project or use case + +The Computer tracing API provides a powerful foundation for recording, analyzing, and improving computer automation workflows across all use cases. \ No newline at end of file diff --git a/docs/content/docs/example-usecases/form-filling.mdx b/docs/content/docs/example-usecases/form-filling.mdx index e819502e..d9a61581 100644 --- a/docs/content/docs/example-usecases/form-filling.mdx +++ b/docs/content/docs/example-usecases/form-filling.mdx @@ -15,6 +15,7 @@ This preset usecase uses [Cua Computer](/computer-sdk/computers) to interact wit ## Quickstart Create a `requirements.txt` file with the following dependencies: + ```text cua-agent cua-computer @@ -34,7 +35,7 @@ ANTHROPIC_API_KEY=your-api-key CUA_API_KEY=sk_cua-api01... ``` -Select the environment you want to run the code in (*click on the underlined values in the code to edit them directly!*): +Select the environment you want to run the code in (_click on the underlined values in the code to edit them directly!_): @@ -58,23 +59,21 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - +logger = logging.getLogger(**name**) def handle_sigint(sig, frame): - print("\\n\\nExecution interrupted by user. Exiting gracefully...") - exit(0) - +print("\\n\\nExecution interrupted by user. Exiting gracefully...") +exit(0) async def fill_application(): - try: - async with Computer( - os_type="linux", - provider_type=VMProviderType.CLOUD, - name="`}{`", - api_key="`}{`", - verbosity=logging.INFO, - ) as computer: +try: +async with Computer( +os_type="linux", +provider_type=VMProviderType.CLOUD, +name="`}{`", +api_key="`}{`", +verbosity=logging.INFO, +) as computer: agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", @@ -124,10 +123,9 @@ async def fill_application(): traceback.print_exc() raise - def main(): - try: - load_dotenv() +try: +load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( @@ -149,9 +147,9 @@ def main(): logger.error(f"Error running automation: {e}") traceback.print_exc() +if **name** == "**main**": +main()`} -if __name__ == "__main__": - main()`} @@ -175,22 +173,20 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - +logger = logging.getLogger(**name**) def handle_sigint(sig, frame): - print("\\n\\nExecution interrupted by user. Exiting gracefully...") - exit(0) - +print("\\n\\nExecution interrupted by user. Exiting gracefully...") +exit(0) async def fill_application(): - try: - async with Computer( - os_type="macos", - provider_type=VMProviderType.LUME, - name="`}{`", - verbosity=logging.INFO, - ) as computer: +try: +async with Computer( +os_type="macos", +provider_type=VMProviderType.LUME, +name="`}{`", +verbosity=logging.INFO, +) as computer: agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", @@ -240,10 +236,9 @@ async def fill_application(): traceback.print_exc() raise - def main(): - try: - load_dotenv() +try: +load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( @@ -259,9 +254,9 @@ def main(): logger.error(f"Error running automation: {e}") traceback.print_exc() +if **name** == "**main**": +main()`} -if __name__ == "__main__": - main()`}
@@ -283,21 +278,19 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - +logger = logging.getLogger(**name**) def handle_sigint(sig, frame): - print("\\n\\nExecution interrupted by user. Exiting gracefully...") - exit(0) - +print("\\n\\nExecution interrupted by user. Exiting gracefully...") +exit(0) async def fill_application(): - try: - async with Computer( - os_type="windows", - provider_type=VMProviderType.WINDOWS_SANDBOX, - verbosity=logging.INFO, - ) as computer: +try: +async with Computer( +os_type="windows", +provider_type=VMProviderType.WINDOWS_SANDBOX, +verbosity=logging.INFO, +) as computer: agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", @@ -347,10 +340,9 @@ async def fill_application(): traceback.print_exc() raise - def main(): - try: - load_dotenv() +try: +load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( @@ -366,9 +358,9 @@ def main(): logger.error(f"Error running automation: {e}") traceback.print_exc() +if **name** == "**main**": +main()`} -if __name__ == "__main__": - main()`} @@ -392,22 +384,20 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - +logger = logging.getLogger(**name**) def handle_sigint(sig, frame): - print("\\n\\nExecution interrupted by user. Exiting gracefully...") - exit(0) - +print("\\n\\nExecution interrupted by user. Exiting gracefully...") +exit(0) async def fill_application(): - try: - async with Computer( - os_type="linux", - provider_type=VMProviderType.DOCKER, - name="`}{`", - verbosity=logging.INFO, - ) as computer: +try: +async with Computer( +os_type="linux", +provider_type=VMProviderType.DOCKER, +name="`}{`", +verbosity=logging.INFO, +) as computer: agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", @@ -457,10 +447,9 @@ async def fill_application(): traceback.print_exc() raise - def main(): - try: - load_dotenv() +try: +load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( @@ -476,9 +465,9 @@ def main(): logger.error(f"Error running automation: {e}") traceback.print_exc() +if **name** == "**main**": +main()`} -if __name__ == "__main__": - main()`} @@ -488,4 +477,4 @@ if __name__ == "__main__": - Learn more about [Cua computers](/computer-sdk/computers) and [computer commands](/computer-sdk/commands) - Read about [Agent loops](/agent-sdk/agent-loops), [tools](/agent-sdk/custom-tools), and [supported model providers](/agent-sdk/supported-model-providers/) -- Experiment with different [Models and Providers](/agent-sdk/supported-model-providers/) +- Experiment with different [Models and Providers](/agent-sdk/supported-model-providers/) diff --git a/docs/content/docs/libraries/computer-server/Commands.mdx b/docs/content/docs/libraries/computer-server/Commands.mdx index 269162a1..9c220826 100644 --- a/docs/content/docs/libraries/computer-server/Commands.mdx +++ b/docs/content/docs/libraries/computer-server/Commands.mdx @@ -7,42 +7,42 @@ description: List of all commands supported by the Computer Server API (WebSocke This page lists all supported commands for the Computer Server, available via both WebSocket and REST API endpoints. -| Command | Description | -|---------------------|--------------------------------------------| -| version | Get protocol and package version info | -| run_command | Run a shell command | -| screenshot | Capture a screenshot | -| get_screen_size | Get the screen size | -| get_cursor_position | Get the current mouse cursor position | -| mouse_down | Mouse button down | -| mouse_up | Mouse button up | -| left_click | Left mouse click | -| right_click | Right mouse click | -| double_click | Double mouse click | -| move_cursor | Move mouse cursor to coordinates | -| drag_to | Drag mouse to coordinates | -| drag | Drag mouse by offset | -| key_down | Keyboard key down | -| key_up | Keyboard key up | -| type_text | Type text | -| press_key | Press a single key | -| hotkey | Press a hotkey combination | -| scroll | Scroll the screen | -| scroll_down | Scroll down | -| scroll_up | Scroll up | -| copy_to_clipboard | Copy text to clipboard | -| set_clipboard | Set clipboard content | -| file_exists | Check if a file exists | -| directory_exists | Check if a directory exists | -| list_dir | List files/directories in a directory | -| read_text | Read text from a file | -| write_text | Write text to a file | -| read_bytes | Read bytes from a file | -| write_bytes | Write bytes to a file | -| get_file_size | Get file size | -| delete_file | Delete a file | -| create_dir | Create a directory | -| delete_dir | Delete a directory | -| get_accessibility_tree | Get accessibility tree (if supported) | -| find_element | Find element in accessibility tree | -| diorama_cmd | Run a diorama command (if supported) | +| Command | Description | +| ---------------------- | ------------------------------------- | +| version | Get protocol and package version info | +| run_command | Run a shell command | +| screenshot | Capture a screenshot | +| get_screen_size | Get the screen size | +| get_cursor_position | Get the current mouse cursor position | +| mouse_down | Mouse button down | +| mouse_up | Mouse button up | +| left_click | Left mouse click | +| right_click | Right mouse click | +| double_click | Double mouse click | +| move_cursor | Move mouse cursor to coordinates | +| drag_to | Drag mouse to coordinates | +| drag | Drag mouse by offset | +| key_down | Keyboard key down | +| key_up | Keyboard key up | +| type_text | Type text | +| press_key | Press a single key | +| hotkey | Press a hotkey combination | +| scroll | Scroll the screen | +| scroll_down | Scroll down | +| scroll_up | Scroll up | +| copy_to_clipboard | Copy text to clipboard | +| set_clipboard | Set clipboard content | +| file_exists | Check if a file exists | +| directory_exists | Check if a directory exists | +| list_dir | List files/directories in a directory | +| read_text | Read text from a file | +| write_text | Write text to a file | +| read_bytes | Read bytes from a file | +| write_bytes | Write bytes to a file | +| get_file_size | Get file size | +| delete_file | Delete a file | +| create_dir | Create a directory | +| delete_dir | Delete a directory | +| get_accessibility_tree | Get accessibility tree (if supported) | +| find_element | Find element in accessibility tree | +| diorama_cmd | Run a diorama command (if supported) | diff --git a/docs/content/docs/libraries/computer-server/REST-API.mdx b/docs/content/docs/libraries/computer-server/REST-API.mdx index 369565de..18f5980b 100644 --- a/docs/content/docs/libraries/computer-server/REST-API.mdx +++ b/docs/content/docs/libraries/computer-server/REST-API.mdx @@ -16,6 +16,7 @@ The Computer Server exposes a single REST endpoint for command execution: - Returns results as a streaming response (text/event-stream) ### Request Format + ```json { "command": "", @@ -24,10 +25,12 @@ The Computer Server exposes a single REST endpoint for command execution: ``` ### Required Headers (for cloud containers) + - `X-Container-Name`: Name of the container (cloud only) - `X-API-Key`: API key for authentication (cloud only) ### Example Request (Python) + ```python import requests @@ -38,6 +41,7 @@ print(resp.text) ``` ### Example Request (Cloud) + ```python import requests @@ -52,7 +56,9 @@ print(resp.text) ``` ### Response Format + Streaming text/event-stream with JSON objects, e.g.: + ``` data: {"success": true, "content": "..."} @@ -60,4 +66,5 @@ data: {"success": false, "error": "..."} ``` ### Supported Commands + See [Commands Reference](./Commands) for the full list of commands and parameters. diff --git a/docs/content/docs/libraries/computer-server/WebSocket-API.mdx b/docs/content/docs/libraries/computer-server/WebSocket-API.mdx index 98d6d7ad..00d20d21 100644 --- a/docs/content/docs/libraries/computer-server/WebSocket-API.mdx +++ b/docs/content/docs/libraries/computer-server/WebSocket-API.mdx @@ -11,7 +11,9 @@ The Computer Server exposes a WebSocket endpoint for real-time command execution - `wss://your-container.containers.cloud.trycua.com:8443/ws` (cloud) ### Authentication (Cloud Only) + For cloud containers, you must authenticate immediately after connecting: + ```json { "command": "authenticate", @@ -21,10 +23,13 @@ For cloud containers, you must authenticate immediately after connecting: } } ``` + If authentication fails, the connection is closed. ### Command Format + Send JSON messages: + ```json { "command": "", @@ -33,6 +38,7 @@ Send JSON messages: ``` ### Example (Python) + ```python import websockets import asyncio @@ -49,6 +55,7 @@ asyncio.run(main()) ``` ### Example (Cloud) + ```python import websockets import asyncio @@ -74,7 +81,9 @@ asyncio.run(main()) ``` ### Response Format + Each response is a JSON object: + ```json { "success": true, @@ -83,4 +92,5 @@ Each response is a JSON object: ``` ### Supported Commands + See [Commands Reference](./Commands) for the full list of commands and parameters. diff --git a/docs/content/docs/libraries/computer-server/index.mdx b/docs/content/docs/libraries/computer-server/index.mdx index fcf265da..d5affd25 100644 --- a/docs/content/docs/libraries/computer-server/index.mdx +++ b/docs/content/docs/libraries/computer-server/index.mdx @@ -6,7 +6,16 @@ github: - https://github.com/trycua/cua/tree/main/libs/python/computer-server --- -A corresponding Jupyter Notebook is available for this documentation. + + A corresponding{' '} + + Jupyter Notebook + {' '} + is available for this documentation. + The Computer Server API reference documentation is currently under development. diff --git a/docs/content/docs/libraries/computer/index.mdx b/docs/content/docs/libraries/computer/index.mdx index 6638f878..69478b20 100644 --- a/docs/content/docs/libraries/computer/index.mdx +++ b/docs/content/docs/libraries/computer/index.mdx @@ -20,4 +20,4 @@ See the [Commands](../computer-sdk/commands) documentation for all supported com ## Sandboxed Python Functions -See the [Sandboxed Python](../computer-sdk/sandboxed-python) documentation for running Python functions securely in isolated environments on a remote Cua Computer. \ No newline at end of file +See the [Sandboxed Python](../computer-sdk/sandboxed-python) documentation for running Python functions securely in isolated environments on a remote Cua Computer. diff --git a/docs/content/docs/libraries/lume/cli-reference.mdx b/docs/content/docs/libraries/lume/cli-reference.mdx index 5afcc7fe..20120616 100644 --- a/docs/content/docs/libraries/lume/cli-reference.mdx +++ b/docs/content/docs/libraries/lume/cli-reference.mdx @@ -18,7 +18,8 @@ lume run ubuntu-noble-vanilla:latest ``` -We provide [prebuilt VM images](../lume/prebuilt-images) in our [ghcr registry](https://github.com/orgs/trycua/packages). + We provide [prebuilt VM images](../lume/prebuilt-images) in our [ghcr + registry](https://github.com/orgs/trycua/packages). ### Create a Custom VM @@ -37,10 +38,11 @@ The actual disk space used by sparse images will be much lower than the logical ## VM Management - lume create <name> +lume create <name> Create a new macOS or Linux virtual machine. **Options:** + - `--os ` - Operating system to install (macOS or linux, default: macOS) - `--cpu ` - Number of CPU cores (default: 4) - `--memory ` - Memory size, e.g., 8GB (default: 4GB) @@ -50,6 +52,7 @@ Create a new macOS or Linux virtual machine. - `--storage ` - VM storage location to use **Examples:** + ```bash # Create macOS VM with custom specs lume create my-mac --cpu 6 --memory 16GB --disk-size 100GB @@ -61,10 +64,11 @@ lume create my-ubuntu --os linux --cpu 2 --memory 8GB lume create my-sequoia --ipsw latest ``` - lume run <name> +lume run <name> Start and run a virtual machine. **Options:** + - `--no-display` - Do not start the VNC client app - `--shared-dir ` - Share directory with VM (format: path[:ro|rw]) - `--mount ` - For Linux VMs only, attach a read-only disk image @@ -75,6 +79,7 @@ Start and run a virtual machine. - `--storage ` - VM storage location to use **Examples:** + ```bash # Run VM with shared directory lume run my-vm --shared-dir /path/to/share:rw @@ -86,42 +91,52 @@ lume run my-vm --no-display lume run my-mac --recovery-mode true ``` - lume stop <name> +lume stop <name> Stop a running virtual machine. **Options:** + - `--storage ` - VM storage location to use ### lume delete <name> + Delete a virtual machine and its associated files. **Options:** + - `--force` - Force deletion without confirmation - `--storage ` - VM storage location to use ### lume clone <name> <new-name> + Create a copy of an existing virtual machine. **Options:** + - `--source-storage ` - Source VM storage location - `--dest-storage ` - Destination VM storage location ## VM Information and Configuration ### lume ls + List all virtual machines and their status. ### lume get <name> + Get detailed information about a specific virtual machine. **Options:** + - `-f, --format ` - Output format (json|text) - `--storage ` - VM storage location to use ### lume set <name> + Modify virtual machine configuration. **Options:** + - `--cpu ` - New number of CPU cores (e.g., 4) - `--memory ` - New memory size (e.g., 8192MB or 8GB) - `--disk-size ` - New disk size (e.g., 40960MB or 40GB) @@ -129,6 +144,7 @@ Modify virtual machine configuration. - `--storage ` - VM storage location to use **Examples:** + ```bash # Increase VM memory lume set my-vm --memory 16GB @@ -143,20 +159,25 @@ lume set my-vm --cpu 8 ## Image Management ### lume images + List available macOS images in local cache. ### lume pull <image> + Download a VM image from a container registry. **Options:** + - `--registry ` - Container registry URL (default: ghcr.io) - `--organization ` - Organization to pull from (default: trycua) - `--storage ` - VM storage location to use ### lume push <name> <image:tag> + Upload a VM image to a container registry. **Options:** + - `--additional-tags ` - Additional tags to push the same image to - `--registry ` - Container registry URL (default: ghcr.io) - `--organization ` - Organization/user to push to (default: trycua) @@ -167,38 +188,46 @@ Upload a VM image to a container registry. - `--reassemble` - Verify integrity by reassembling chunks (requires --dry-run) ### lume ipsw + Get the latest macOS restore image URL. ### lume prune + Remove cached images to free up disk space. ## Configuration ### lume config + Manage Lume configuration settings. **Subcommands:** ##### Storage Management + - `lume config storage add ` - Add a new VM storage location - `lume config storage remove ` - Remove a VM storage location - `lume config storage list` - List all VM storage locations - `lume config storage default ` - Set the default VM storage location ##### Cache Management + - `lume config cache get` - Get current cache directory - `lume config cache set ` - Set cache directory ##### Image Caching + - `lume config caching get` - Show current caching status - `lume config caching set ` - Enable or disable image caching ## API Server ### lume serve + Start the Lume API server for programmatic access. **Options:** + - `--port ` - Port to listen on (default: 7777) ## Global Options @@ -206,4 +235,4 @@ Start the Lume API server for programmatic access. These options are available for all commands: - `--help` - Show help information -- `--version` - Show version number \ No newline at end of file +- `--version` - Show version number diff --git a/docs/content/docs/libraries/lume/http-api.mdx b/docs/content/docs/libraries/lume/http-api.mdx index 04792f26..f908f85e 100644 --- a/docs/content/docs/libraries/lume/http-api.mdx +++ b/docs/content/docs/libraries/lume/http-api.mdx @@ -13,9 +13,8 @@ http://localhost:7777 ``` - The HTTP API service runs on port `7777` by default. If you'd like to use a - different port, pass the `--port` option during installation or when running - `lume serve`. + The HTTP API service runs on port `7777` by default. If you'd like to use a different port, pass + the `--port` option during installation or when running `lume serve`. ## Endpoints @@ -726,15 +725,15 @@ Push a VM to a registry as an image (asynchronous operation). #### Parameters -| Name | Type | Required | Description | -| ------------ | ------------ | -------- | ----------------------------------------------- | -| name | string | Yes | Local VM name to push | -| imageName | string | Yes | Image name in registry | -| tags | array | Yes | Image tags (e.g. `["latest", "v1"]`) | -| organization | string | Yes | Organization name | -| registry | string | No | Registry host (e.g. `ghcr.io`) | -| chunkSizeMb | integer | No | Chunk size in MB for upload | -| storage | string/null | No | Storage type (`ssd`, etc.) | +| Name | Type | Required | Description | +| ------------ | ----------- | -------- | ------------------------------------ | +| name | string | Yes | Local VM name to push | +| imageName | string | Yes | Image name in registry | +| tags | array | Yes | Image tags (e.g. `["latest", "v1"]`) | +| organization | string | Yes | Organization name | +| registry | string | No | Registry host (e.g. `ghcr.io`) | +| chunkSizeMb | integer | No | Chunk size in MB for upload | +| storage | string/null | No | Storage type (`ssd`, etc.) | #### Example Request @@ -747,13 +746,13 @@ curl --connect-timeout 6000 \ -X POST \ -H "Content-Type: application/json" \ -d '{ - "name": "my-local-vm", + "name": "my-local-vm", "imageName": "my-image", "tags": ["latest", "v1"], - "organization": "my-org", + "organization": "my-org", "registry": "ghcr.io", "chunkSizeMb": 512, - "storage": null + "storage": null }' \ http://localhost:7777/lume/vms/push ``` @@ -808,10 +807,7 @@ console.log(await res.json()); "message": "Push initiated in background", "name": "my-local-vm", "imageName": "my-image", - "tags": [ - "latest", - "v1" - ] + "tags": ["latest", "v1"] } ``` @@ -857,10 +853,7 @@ console.log(await res.json()); ```json { - "local": [ - "macos-sequoia-xcode:latest", - "macos-sequoia-vanilla:latest" - ] + "local": ["macos-sequoia-xcode:latest", "macos-sequoia-vanilla:latest"] } ``` @@ -1005,11 +998,11 @@ Update Lume configuration settings. #### Parameters -| Name | Type | Required | Description | -| --------------- | ------- | -------- | -------------------------------- | -| homeDirectory | string | No | Lume home directory path | -| cacheDirectory | string | No | Cache directory path | -| cachingEnabled | boolean | No | Enable or disable caching | +| Name | Type | Required | Description | +| -------------- | ------- | -------- | ------------------------- | +| homeDirectory | string | No | Lume home directory path | +| cacheDirectory | string | No | Cache directory path | +| cachingEnabled | boolean | No | Enable or disable caching | #### Example Request diff --git a/docs/content/docs/libraries/lume/index.mdx b/docs/content/docs/libraries/lume/index.mdx index d62c80e0..152d08c7 100644 --- a/docs/content/docs/libraries/lume/index.mdx +++ b/docs/content/docs/libraries/lume/index.mdx @@ -5,4 +5,4 @@ github: - https://github.com/trycua/cua/tree/main/libs/lume --- -Lume is a lightweight Command Line Interface and local API server for creating, running and managing **macOS and Linux virtual machines** with near-native performance on Apple Silicon, using Apple's [Virtualization.Framework](https://developer.apple.com/documentation/virtualization). \ No newline at end of file +Lume is a lightweight Command Line Interface and local API server for creating, running and managing **macOS and Linux virtual machines** with near-native performance on Apple Silicon, using Apple's [Virtualization.Framework](https://developer.apple.com/documentation/virtualization). diff --git a/docs/content/docs/libraries/lume/installation.mdx b/docs/content/docs/libraries/lume/installation.mdx index 161e48e0..7b990665 100644 --- a/docs/content/docs/libraries/lume/installation.mdx +++ b/docs/content/docs/libraries/lume/installation.mdx @@ -15,10 +15,12 @@ lume run macos-sequoia-vanilla:latest ``` -All prebuilt images use the default password `lume`. Change this immediately after your first login using the `passwd` command. + All prebuilt images use the default password `lume`. Change this immediately after your first + login using the `passwd` command. **System Requirements**: + - Apple Silicon Mac (M1, M2, M3, etc.) - macOS 13.0 or later - At least 8GB of RAM (16GB recommended) @@ -33,6 +35,7 @@ Install with a single command: ``` ### Manual Start (No Background Service) + By default, Lume is installed as a background service that starts automatically on login. If you prefer to start the Lume API service manually when needed, you can use the `--no-background-service` option: ```bash @@ -40,8 +43,11 @@ By default, Lume is installed as a background service that starts automatically ``` -With this option, you'll need to manually start the Lume API service by running `lume serve` in your terminal whenever you need to use tools or libraries that rely on the Lume API (such as the Computer-Use Agent). + With this option, you'll need to manually start the Lume API service by running `lume serve` in + your terminal whenever you need to use tools or libraries that rely on the Lume API (such as the + Computer-Use Agent). ## Manual Download and Installation -You can also download the `lume.pkg.tar.gz` archive from the [latest release](https://github.com/trycua/cua/releases?q=lume&expanded=true), extract it, and install the package manually. \ No newline at end of file + +You can also download the `lume.pkg.tar.gz` archive from the [latest release](https://github.com/trycua/cua/releases?q=lume&expanded=true), extract it, and install the package manually. diff --git a/docs/content/docs/libraries/lume/prebuilt-images.mdx b/docs/content/docs/libraries/lume/prebuilt-images.mdx index 49628c59..4e4e3e67 100644 --- a/docs/content/docs/libraries/lume/prebuilt-images.mdx +++ b/docs/content/docs/libraries/lume/prebuilt-images.mdx @@ -5,24 +5,29 @@ title: Prebuilt Images Pre-built images are available in the registry [ghcr.io/trycua](https://github.com/orgs/trycua/packages). These images come with an SSH server pre-configured and auto-login enabled. -The default password on pre-built images is `lume`. For the security of your VM, change this password after your first login. + The default password on pre-built images is `lume`. For the security of your VM, change this + password after your first login. ## Available Images The following pre-built images are available to download via `lume pull`: -| Image | Tag | Description | Logical Size | -|-------|------------|-------------|------| -| `macos-sequoia-vanilla` | `latest`, `15.2` | macOS Sequoia 15.2 image | 20GB | -| `macos-sequoia-xcode` | `latest`, `15.2` | macOS Sequoia 15.2 image with Xcode command line tools | 22GB | -| `macos-sequoia-cua` | `latest`, `15.3` | macOS Sequoia 15.3 image compatible with the Computer interface | 24GB | -| `ubuntu-noble-vanilla` | `latest`, `24.04.1` | [Ubuntu Server for ARM 24.04.1 LTS](https://ubuntu.com/download/server/arm) with Ubuntu Desktop | 20GB | +| Image | Tag | Description | Logical Size | +| ----------------------- | ------------------- | ----------------------------------------------------------------------------------------------- | ------------ | +| `macos-sequoia-vanilla` | `latest`, `15.2` | macOS Sequoia 15.2 image | 20GB | +| `macos-sequoia-xcode` | `latest`, `15.2` | macOS Sequoia 15.2 image with Xcode command line tools | 22GB | +| `macos-sequoia-cua` | `latest`, `15.3` | macOS Sequoia 15.3 image compatible with the Computer interface | 24GB | +| `ubuntu-noble-vanilla` | `latest`, `24.04.1` | [Ubuntu Server for ARM 24.04.1 LTS](https://ubuntu.com/download/server/arm) with Ubuntu Desktop | 20GB | ## Disk Space For additional disk space, resize the VM disk after pulling the image using the `lume set --disk-size ` command. Note that the actual disk space used by sparse images will be much lower than the logical size listed. -**Important Note (v0.2.0+):** Images are being re-uploaded with sparse file system optimizations enabled, resulting in significantly lower actual disk usage. Older images (without the `-sparse` suffix) are now **deprecated**. The last version of `lume` fully supporting the non-sparse images was `v0.1.x`. Starting from `v0.2.0`, lume will automatically pull images optimized with sparse file system support. - \ No newline at end of file + **Important Note (v0.2.0+):** Images are being re-uploaded with sparse file system optimizations + enabled, resulting in significantly lower actual disk usage. Older images (without the `-sparse` + suffix) are now **deprecated**. The last version of `lume` fully supporting the non-sparse images + was `v0.1.x`. Starting from `v0.2.0`, lume will automatically pull images optimized with sparse + file system support. + diff --git a/docs/content/docs/libraries/lumier/building-lumier.mdx b/docs/content/docs/libraries/lumier/building-lumier.mdx index df8ad4f8..bd9b9951 100644 --- a/docs/content/docs/libraries/lumier/building-lumier.mdx +++ b/docs/content/docs/libraries/lumier/building-lumier.mdx @@ -39,4 +39,4 @@ docker build -t yourusername/lumier:custom . # Push to Docker Hub (after docker login) docker push yourusername/lumier:custom -``` \ No newline at end of file +``` diff --git a/docs/content/docs/libraries/lumier/docker-compose.mdx b/docs/content/docs/libraries/lumier/docker-compose.mdx index fece3473..312598e7 100644 --- a/docs/content/docs/libraries/lumier/docker-compose.mdx +++ b/docs/content/docs/libraries/lumier/docker-compose.mdx @@ -13,10 +13,10 @@ services: container_name: lumier-vm restart: unless-stopped ports: - - "8006:8006" # Port for VNC access + - '8006:8006' # Port for VNC access volumes: - - ./storage:/storage # VM persistent storage - - ./shared:/shared # Shared folder accessible in the VM + - ./storage:/storage # VM persistent storage + - ./shared:/shared # Shared folder accessible in the VM environment: - VM_NAME=lumier-vm - VERSION=ghcr.io/trycua/macos-sequoia-cua:latest diff --git a/docs/content/docs/libraries/lumier/docker.mdx b/docs/content/docs/libraries/lumier/docker.mdx index a14d0599..b7c72050 100644 --- a/docs/content/docs/libraries/lumier/docker.mdx +++ b/docs/content/docs/libraries/lumier/docker.mdx @@ -5,6 +5,7 @@ title: Docker You can use Lumier through Docker: ### Run a macOS VM (ephemeral) + ```bash # Run the container with temporary storage (using pre-built image from Docker Hub) docker run -it --rm \ @@ -16,12 +17,15 @@ docker run -it --rm \ -e RAM_SIZE=8192 \ trycua/lumier:latest ``` + Access the VM in your browser at **http://localhost:8006**. After running the command above, you can access your macOS VM through a web browser (e.g., http://localhost:8006). -With the basic setup above, your VM will be reset when you stop the container (ephemeral mode). This means any changes you make inside the macOS VM will be lost. See the section below for how to save your VM state. + With the basic setup above, your VM will be reset when you stop the container (ephemeral mode). + This means any changes you make inside the macOS VM will be lost. See the section below for how to + save your VM state. ## Saving Your VM State @@ -121,4 +125,4 @@ When running Lumier, you'll need to configure a few things: - `HOST_STORAGE_PATH`: Path to save VM state (when using persistent storage) - `HOST_SHARED_PATH`: Path to the shared folder (optional) -- **Background service**: The `lume serve` service should be running on your host (starts automatically when you install Lume using the `install.sh` script above). \ No newline at end of file +- **Background service**: The `lume serve` service should be running on your host (starts automatically when you install Lume using the `install.sh` script above). diff --git a/docs/content/docs/libraries/lumier/index.mdx b/docs/content/docs/libraries/lumier/index.mdx index 814055ba..d21768f4 100644 --- a/docs/content/docs/libraries/lumier/index.mdx +++ b/docs/content/docs/libraries/lumier/index.mdx @@ -15,7 +15,9 @@ github: ## How It Works -We're using Docker primarily as a convenient delivery mechanism, not as an isolation layer. Unlike traditional Docker containers, Lumier leverages the Apple Virtualization Framework (Apple Vz) through the `lume` CLI to create true virtual machines. + We're using Docker primarily as a convenient delivery mechanism, not as an isolation layer. Unlike + traditional Docker containers, Lumier leverages the Apple Virtualization Framework (Apple Vz) + through the `lume` CLI to create true virtual machines. Here's what's happening behind the scenes: @@ -23,4 +25,4 @@ Here's what's happening behind the scenes: 1. The Docker container provides a consistent environment to run the Lumier interface 2. Lumier connects to the Lume service running on your host Mac 3. Lume uses Apple's Virtualization Framework to create a true macOS virtual machine -4. The VM runs with hardware acceleration using your Mac's native virtualization capabilities \ No newline at end of file +4. The VM runs with hardware acceleration using your Mac's native virtualization capabilities diff --git a/docs/content/docs/libraries/lumier/installation.mdx b/docs/content/docs/libraries/lumier/installation.mdx index e0c20267..d2e62399 100644 --- a/docs/content/docs/libraries/lumier/installation.mdx +++ b/docs/content/docs/libraries/lumier/installation.mdx @@ -7,8 +7,9 @@ Before using Lumier, make sure you have: 1. **Docker for Apple Silicon** - download it [here](https://desktop.docker.com/mac/main/arm64/Docker.dmg) and follow the installation instructions. 2. **Lume** - This is the virtualization CLI that powers Lumier. Install it with this command: + ```bash /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" ``` -After installation, Lume runs as a background service and listens on port 7777. This service allows Lumier to create and manage virtual machines. If port 7777 is already in use on your system, you can specify a different port with the `--port` option when running the `install.sh` script. \ No newline at end of file +After installation, Lume runs as a background service and listens on port 7777. This service allows Lumier to create and manage virtual machines. If port 7777 is already in use on your system, you can specify a different port with the `--port` option when running the `install.sh` script. diff --git a/docs/content/docs/libraries/mcp-server/client-integrations.mdx b/docs/content/docs/libraries/mcp-server/client-integrations.mdx index 6a79f5b3..a95df6a9 100644 --- a/docs/content/docs/libraries/mcp-server/client-integrations.mdx +++ b/docs/content/docs/libraries/mcp-server/client-integrations.mdx @@ -115,4 +115,4 @@ All MCP clients can configure the server using environment variables: - `CUA_MAX_IMAGES` - Maximum images to keep in context - `CUA_USE_HOST_COMPUTER_SERVER` - Use host system instead of VM -See the [Configuration](/docs/libraries/mcp-server/configuration) page for detailed configuration options. \ No newline at end of file +See the [Configuration](/docs/libraries/mcp-server/configuration) page for detailed configuration options. diff --git a/docs/content/docs/libraries/mcp-server/index.mdx b/docs/content/docs/libraries/mcp-server/index.mdx index a20b5d09..7f2c7684 100644 --- a/docs/content/docs/libraries/mcp-server/index.mdx +++ b/docs/content/docs/libraries/mcp-server/index.mdx @@ -24,4 +24,4 @@ github: 2. **Configure**: Add to your MCP client configuration 3. **Use**: Ask Claude to perform computer tasks -See the [Installation](/docs/libraries/mcp-server/installation) guide for detailed setup instructions. \ No newline at end of file +See the [Installation](/docs/libraries/mcp-server/installation) guide for detailed setup instructions. diff --git a/docs/content/docs/libraries/mcp-server/installation.mdx b/docs/content/docs/libraries/mcp-server/installation.mdx index ce4f87a6..e3e11a6b 100644 --- a/docs/content/docs/libraries/mcp-server/installation.mdx +++ b/docs/content/docs/libraries/mcp-server/installation.mdx @@ -9,8 +9,9 @@ pip install cua-mcp-server ``` This will install: + - The MCP server -- CUA agent and computer dependencies +- CUA agent and computer dependencies - An executable `cua-mcp-server` script in your PATH ## Easy Setup Script @@ -22,6 +23,7 @@ curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/python/mcp-ser ``` This script will: + - Create the ~/.cua directory if it doesn't exist - Generate a startup script at ~/.cua/start_mcp_server.sh - Make the script executable @@ -30,7 +32,7 @@ This script will: You can then use the script in your MCP configuration like this: ```json -{ +{ "mcpServers": { "cua-agent": { "command": "/bin/bash", @@ -130,4 +132,4 @@ If you're working with the CUA source code directly (like in the CUA repository) **Viewing Logs:** ```bash tail -n 20 -f ~/Library/Logs/Claude/mcp*.log -``` \ No newline at end of file +``` diff --git a/docs/content/docs/libraries/mcp-server/llm-integrations.mdx b/docs/content/docs/libraries/mcp-server/llm-integrations.mdx index a7515ae2..6dedd52d 100644 --- a/docs/content/docs/libraries/mcp-server/llm-integrations.mdx +++ b/docs/content/docs/libraries/mcp-server/llm-integrations.mdx @@ -1,6 +1,7 @@ --- title: LLM Integrations --- + ## LiteLLM Integration This MCP server features comprehensive liteLLM integration, allowing you to use any supported LLM provider with a simple model string configuration. @@ -10,7 +11,8 @@ This MCP server features comprehensive liteLLM integration, allowing you to use - **Extensive Provider Support**: Works with Anthropic, OpenAI, local models, and any liteLLM-compatible provider ### Model String Examples: + - **Anthropic**: `"anthropic/claude-3-5-sonnet-20241022"` - **OpenAI**: `"openai/computer-use-preview"` - **UI-TARS**: `"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B"` -- **Omni + Any LiteLLM**: `"omniparser+litellm/gpt-4o"`, `"omniparser+litellm/claude-3-haiku"`, `"omniparser+ollama_chat/gemma3"` \ No newline at end of file +- **Omni + Any LiteLLM**: `"omniparser+litellm/gpt-4o"`, `"omniparser+litellm/claude-3-haiku"`, `"omniparser+ollama_chat/gemma3"` diff --git a/docs/content/docs/libraries/mcp-server/tools.mdx b/docs/content/docs/libraries/mcp-server/tools.mdx index 20e91311..14901057 100644 --- a/docs/content/docs/libraries/mcp-server/tools.mdx +++ b/docs/content/docs/libraries/mcp-server/tools.mdx @@ -60,4 +60,4 @@ The MCP server supports multi-client sessions with automatic resource management "Take a screenshot of the current screen" "Show me the session statistics" "Cleanup session abc123" -``` \ No newline at end of file +``` diff --git a/docs/content/docs/libraries/som/configuration.mdx b/docs/content/docs/libraries/som/configuration.mdx index e57cdf1c..b421fdae 100644 --- a/docs/content/docs/libraries/som/configuration.mdx +++ b/docs/content/docs/libraries/som/configuration.mdx @@ -5,18 +5,28 @@ title: Configuration ### Detection Parameters #### Box Threshold (0.3) + Controls the confidence threshold for accepting detections: -Illustration of confidence thresholds in object detection, with a high-confidence detection accepted and a low-confidence detection rejected. -- Higher values (0.3) yield more precise but fewer detections -- Lower values (0.01) catch more potential icons but increase false positives -- Default is 0.3 for optimal precision/recall balance + +Illustration of confidence thresholds in object detection, with a high-confidence detection accepted and a low-confidence detection rejected. +- Higher values (0.3) yield more precise but fewer detections - Lower values (0.01) catch more +potential icons but increase false positives - Default is 0.3 for optimal precision/recall balance #### IOU Threshold (0.1) + Controls how overlapping detections are merged: -Diagram showing Intersection over Union (IOU) with low overlap between two boxes kept separate and high overlap leading to merging. -- Lower values (0.1) more aggressively remove overlapping boxes -- Higher values (0.5) allow more overlapping detections -- Default is 0.1 to handle densely packed UI elements + +Diagram showing Intersection over Union (IOU) with low overlap between two boxes kept separate and high overlap leading to merging. +- Lower values (0.1) more aggressively remove overlapping boxes - Higher values (0.5) allow more +overlapping detections - Default is 0.1 to handle densely packed UI elements ### OCR Configuration @@ -37,6 +47,7 @@ Controls how overlapping detections are merged: ### Hardware Acceleration #### MPS (Metal Performance Shaders) + - Multi-scale detection (640px, 1280px, 1920px) - Test-time augmentation enabled - Half-precision (FP16) @@ -44,6 +55,7 @@ Controls how overlapping detections are merged: - Best for production use when available #### CPU + - Single-scale detection (1280px) - Full-precision (FP32) - Average detection time: ~1.3s @@ -63,4 +75,4 @@ examples/output/ │ └── screenshot_analyzed.png ├── screen_details.txt └── summary.json -``` \ No newline at end of file +``` diff --git a/docs/content/docs/libraries/som/index.mdx b/docs/content/docs/libraries/som/index.mdx index ceba6e62..3eef53f1 100644 --- a/docs/content/docs/libraries/som/index.mdx +++ b/docs/content/docs/libraries/som/index.mdx @@ -6,7 +6,13 @@ github: - https://github.com/trycua/cua/tree/main/libs/python/som --- -A corresponding Python example is available for this documentation. + + A corresponding{' '} + + Python example + {' '} + is available for this documentation. + ## Overview diff --git a/docs/content/docs/quickstart-devs.mdx b/docs/content/docs/quickstart-devs.mdx index 4bd5b9ab..9cda4a2f 100644 --- a/docs/content/docs/quickstart-devs.mdx +++ b/docs/content/docs/quickstart-devs.mdx @@ -35,7 +35,7 @@ You can run your Cua computer in the cloud (recommended for easiest setup), loca Lume containers are macOS virtual machines that run on a macOS host machine. - + 1. Install the Lume CLI: ```bash @@ -51,8 +51,8 @@ You can run your Cua computer in the cloud (recommended for easiest setup), loca - Windows Sandbox provides Windows virtual environments that run on a Windows host machine. - +Windows Sandbox provides Windows virtual environments that run on a Windows host machine. + 1. Enable [Windows Sandbox](https://learn.microsoft.com/en-us/windows/security/application-security/application-isolation/windows-sandbox/windows-sandbox-install) (requires Windows 10 Pro/Enterprise or Windows 11) 2. Install the `pywinsandbox` dependency: @@ -65,8 +65,8 @@ You can run your Cua computer in the cloud (recommended for easiest setup), loca - Docker provides a way to run Ubuntu containers on any host machine. - +Docker provides a way to run Ubuntu containers on any host machine. + 1. Install Docker Desktop or Docker Engine: 2. Pull the CUA Ubuntu sandbox: @@ -173,6 +173,7 @@ Connect to your Cua computer and perform basic interactions, such as taking scre finally: await computer.close() ``` + Install the Cua computer TypeScript SDK: @@ -260,6 +261,7 @@ Connect to your Cua computer and perform basic interactions, such as taking scre await computer.close(); } ``` + @@ -274,11 +276,13 @@ Learn more about computers in the [Cua computers documentation](/computer-sdk/co Utilize an Agent to automate complex tasks by providing it with a goal and allowing it to interact with the computer environment. Install the Cua agent Python SDK: + ```bash pip install "cua-agent[all]" ``` Then, use the `ComputerAgent` object: + ```python from agent import ComputerAgent diff --git a/docs/content/docs/telemetry.mdx b/docs/content/docs/telemetry.mdx index a62b4f5f..fb5437c1 100644 --- a/docs/content/docs/telemetry.mdx +++ b/docs/content/docs/telemetry.mdx @@ -24,6 +24,7 @@ Basic performance metrics and system information that help us understand usage p ### Opt-In Telemetry (Disabled by Default) **Conversation Trajectory Logging**: Full conversation history including: + - User messages and agent responses - Computer actions and their outputs - Reasoning traces from the agent @@ -123,21 +124,21 @@ Note that telemetry settings must be configured during initialization and cannot ### Computer SDK Events -| Event Name | Data Collected | Trigger Notes | -|------------|----------------|---------------| -| **computer_initialized** | • `os`: Operating system (e.g., 'windows', 'darwin', 'linux')
• `os_version`: OS version
• `python_version`: Python version | Triggered when a Computer instance is created | -| **module_init** | • `module`: "computer"
• `version`: Package version
• `python_version`: Full Python version string | Triggered once when the computer package is imported for the first time | +| Event Name | Data Collected | Trigger Notes | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| **computer_initialized** | • `os`: Operating system (e.g., 'windows', 'darwin', 'linux')
• `os_version`: OS version
• `python_version`: Python version | Triggered when a Computer instance is created | +| **module_init** | • `module`: "computer"
• `version`: Package version
• `python_version`: Full Python version string | Triggered once when the computer package is imported for the first time | ### Agent SDK Events -| Event Name | Data Collected | Trigger Notes | -|------------|----------------|---------------| -| **module_init** | • `module`: "agent"
• `version`: Package version
• `python_version`: Full Python version string | Triggered once when the agent package is imported for the first time | -| **agent_session_start** | • `session_id`: Unique UUID for this agent instance
• `agent_type`: Class name (e.g., "ComputerAgent")
• `model`: Model name (e.g., "claude-3-5-sonnet")
• `os`: Operating system
• `os_version`: OS version
• `python_version`: Python version | Triggered when TelemetryCallback is initialized (agent instantiation) | -| **agent_run_start** | • `session_id`: Agent session UUID
• `run_id`: Unique UUID for this run
• `start_time`: Unix timestamp
• `input_context_size`: Character count of input messages
• `num_existing_messages`: Count of existing messages
• `uploaded_trajectory`: Full conversation items (opt-in) | Triggered at the start of each agent.run() call | -| **agent_run_end** | • `session_id`: Agent session UUID
• `run_id`: Run UUID
• `end_time`: Unix timestamp
• `duration_seconds`: Total run duration
• `num_steps`: Total steps taken in this run
• `total_usage`: Accumulated token usage and costs
• `uploaded_trajectory`: Full conversation items (opt-in) | Triggered at the end of each agent.run() call | -| **agent_step** | • `session_id`: Agent session UUID
• `run_id`: Run UUID
• `step`: Step number (incremental)
• `timestamp`: Unix timestamp
• `duration_seconds`: Duration of previous step | Triggered on each agent response/step during a run | -| **agent_usage** | • `session_id`: Agent session UUID
• `run_id`: Run UUID
• `step`: Current step number
• `prompt_tokens`: Tokens in prompt
• `completion_tokens`: Tokens in response
• `total_tokens`: Total tokens used
• `response_cost`: Cost of this API call | Triggered whenever usage information is received from LLM API | +| Event Name | Data Collected | Trigger Notes | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| **module_init** | • `module`: "agent"
• `version`: Package version
• `python_version`: Full Python version string | Triggered once when the agent package is imported for the first time | +| **agent_session_start** | • `session_id`: Unique UUID for this agent instance
• `agent_type`: Class name (e.g., "ComputerAgent")
• `model`: Model name (e.g., "claude-3-5-sonnet")
• `os`: Operating system
• `os_version`: OS version
• `python_version`: Python version | Triggered when TelemetryCallback is initialized (agent instantiation) | +| **agent_run_start** | • `session_id`: Agent session UUID
• `run_id`: Unique UUID for this run
• `start_time`: Unix timestamp
• `input_context_size`: Character count of input messages
• `num_existing_messages`: Count of existing messages
• `uploaded_trajectory`: Full conversation items (opt-in) | Triggered at the start of each agent.run() call | +| **agent_run_end** | • `session_id`: Agent session UUID
• `run_id`: Run UUID
• `end_time`: Unix timestamp
• `duration_seconds`: Total run duration
• `num_steps`: Total steps taken in this run
• `total_usage`: Accumulated token usage and costs
• `uploaded_trajectory`: Full conversation items (opt-in) | Triggered at the end of each agent.run() call | +| **agent_step** | • `session_id`: Agent session UUID
• `run_id`: Run UUID
• `step`: Step number (incremental)
• `timestamp`: Unix timestamp
• `duration_seconds`: Duration of previous step | Triggered on each agent response/step during a run | +| **agent_usage** | • `session_id`: Agent session UUID
• `run_id`: Run UUID
• `step`: Current step number
• `prompt_tokens`: Tokens in prompt
• `completion_tokens`: Tokens in response
• `total_tokens`: Total tokens used
• `response_cost`: Cost of this API call | Triggered whenever usage information is received from LLM API | ## Transparency diff --git a/docs/src/app/(home)/[[...slug]]/page.tsx b/docs/src/app/(home)/[[...slug]]/page.tsx index dc283204..8aa89620 100644 --- a/docs/src/app/(home)/[[...slug]]/page.tsx +++ b/docs/src/app/(home)/[[...slug]]/page.tsx @@ -273,15 +273,99 @@ export async function generateMetadata(props: { if (page.url.includes('api')) title = `${page.data.title} | Cua API Docs`; if (page.url.includes('guide')) title = ` Guide: ${page.data.title} | Cua Docs`; + // Canonical URL points to cua.ai to consolidate all SEO authority on main domain + const canonicalUrl = `https://cua.ai${page.url}`; + + // Extract keywords from the page for SEO + const keywords = [ + 'computer use agent', + 'computer use', + 'AI automation', + 'visual automation', + page.data.title, + ]; + + // Structured data for better Google indexing (TechArticle schema) + const structuredData = { + '@context': 'https://schema.org', + '@type': 'TechArticle', + headline: page.data.title, + description: page.data.description, + url: canonicalUrl, + publisher: { + '@type': 'Organization', + name: 'Cua', + url: 'https://cua.ai', + logo: { + '@type': 'ImageObject', + url: 'https://cua.ai/cua_logo_black.svg', + }, + }, + mainEntityOfPage: { + '@type': 'WebPage', + '@id': canonicalUrl, + }, + }; + + // Breadcrumb schema for better site structure understanding + const breadcrumbSchema = { + '@context': 'https://schema.org', + '@type': 'BreadcrumbList', + itemListElement: [ + { + '@type': 'ListItem', + position: 1, + name: 'Cua', + item: 'https://cua.ai', + }, + { + '@type': 'ListItem', + position: 2, + name: 'Documentation', + item: 'https://cua.ai/docs', + }, + { + '@type': 'ListItem', + position: 3, + name: page.data.title, + item: canonicalUrl, + }, + ], + }; + return { title, description: page.data.description, + keywords, + authors: [{ name: 'Cua', url: 'https://cua.ai' }], + robots: { + index: true, + follow: true, + googleBot: { + index: true, + follow: true, + 'max-image-preview': 'large', + 'max-snippet': -1, + }, + }, + alternates: { + canonical: canonicalUrl, + }, openGraph: { title, description: page.data.description, type: 'article', siteName: 'Cua Docs', - url: 'https://trycua.com/docs', + url: canonicalUrl, + }, + twitter: { + card: 'summary', + title, + description: page.data.description, + creator: '@trycua', + }, + other: { + 'script:ld+json': JSON.stringify([structuredData, breadcrumbSchema]), }, }; } diff --git a/docs/src/app/layout.config.tsx b/docs/src/app/layout.config.tsx index d43acae6..87c652f1 100644 --- a/docs/src/app/layout.config.tsx +++ b/docs/src/app/layout.config.tsx @@ -41,15 +41,15 @@ export const baseOptions: BaseLayoutProps = { githubUrl: 'https://github.com/trycua/cua', links: [ { - url: 'https://trycua.com', - text: 'Cua home', + url: 'https://cua.ai', + text: 'Cua Home', type: 'icon', icon: , - external: false, + external: true, }, { url: 'https://discord.com/invite/mVnXXpdE85', - text: 'Cua discord', + text: 'Discord', type: 'icon', icon: ( <> @@ -69,6 +69,7 @@ export const baseOptions: BaseLayoutProps = { /> ), + external: true, }, ], }; diff --git a/docs/src/app/sitemap.ts b/docs/src/app/sitemap.ts new file mode 100644 index 00000000..4aa0680f --- /dev/null +++ b/docs/src/app/sitemap.ts @@ -0,0 +1,32 @@ +import { MetadataRoute } from 'next'; +import { source } from '@/lib/source'; + +export default function sitemap(): MetadataRoute.Sitemap { + const baseUrl = 'https://cua.ai'; + + // Get all pages from fumadocs source + const pages = source.getPages(); + + // Map pages to sitemap entries with /docs prefix + const docPages = pages.map((page) => { + // Ensure URL starts with /docs + const url = page.url.startsWith('/docs') ? page.url : `/docs${page.url}`; + + return { + url: `${baseUrl}${url}`, + lastModified: new Date(), + changeFrequency: 'weekly' as const, + priority: url === '/docs' ? 1.0 : 0.8, + }; + }); + + // Add main docs page if not included + const mainDocsPage = { + url: `${baseUrl}/docs`, + lastModified: new Date(), + changeFrequency: 'weekly' as const, + priority: 1.0, + }; + + return [mainDocsPage, ...docPages]; +} diff --git a/docs/src/components/footer.tsx b/docs/src/components/footer.tsx index 2aafa33b..b129eebe 100644 --- a/docs/src/components/footer.tsx +++ b/docs/src/components/footer.tsx @@ -1,15 +1,159 @@ export function Footer() { return ( -