diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index e628cf42..8c02c929 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -1,4 +1,4 @@ -name: Bump Version +name: Bump Version & Publish on: workflow_dispatch: @@ -30,6 +30,9 @@ permissions: jobs: bump-version: runs-on: ubuntu-latest + outputs: + agent_version: ${{ steps.agent_version.outputs.version }} + computer_version: ${{ steps.computer_version.outputs.version }} steps: - name: Set package directory id: package @@ -86,6 +89,46 @@ jobs: cd ${{ steps.package.outputs.directory }} bump2version ${{ inputs.bump_type }} + - name: Also bump cua-agent + if: ${{ inputs.service == 'cua-computer' }} + run: | + cd libs/python/agent + bump2version ${{ inputs.bump_type }} + + - name: Capture bumped agent version + if: ${{ inputs.service == 'cua-agent' || inputs.service == 'cua-computer' }} + id: agent_version + run: | + cd libs/python/agent + VERSION=$(python -c "import tomllib; from pathlib import Path; data = tomllib.loads(Path('pyproject.toml').read_text()); print(data['project']['version'])") + echo "Agent version: $VERSION" + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Capture bumped computer version + if: ${{ inputs.service == 'cua-computer' }} + id: computer_version + run: | + cd libs/python/computer + VERSION=$(python -c "import tomllib; from pathlib import Path; data = tomllib.loads(Path('pyproject.toml').read_text()); print(data['project']['version'])") + echo "Computer version: $VERSION" + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + - name: Push changes run: | git push origin main --follow-tags + + publish-computer: + needs: bump-version + if: ${{ inputs.service == 'cua-computer' }} + uses: ./.github/workflows/pypi-publish-computer.yml + with: + version: ${{ needs.bump-version.outputs.computer_version }} + secrets: inherit + + publish-agent: + needs: [bump-version, publish-computer] + if: ${{ always() && (inputs.service == 'cua-agent' || inputs.service == 'cua-computer') && needs.bump-version.result == 'success' && (inputs.service == 'cua-agent' || needs.publish-computer.result == 'success') }} + uses: ./.github/workflows/pypi-publish-agent.yml + with: + version: ${{ needs.bump-version.outputs.agent_version }} + secrets: inherit diff --git a/.github/workflows/ci-lume.yml b/.github/workflows/ci-lume.yml index d33191cc..abf678e0 100644 --- a/.github/workflows/ci-lume.yml +++ b/.github/workflows/ci-lume.yml @@ -3,7 +3,13 @@ on: push: branches: - "main" - pull_request: {} + paths: + - "libs/lume/**" + - ".github/workflows/ci-lume.yml" + pull_request: + paths: + - "libs/lume/**" + - ".github/workflows/ci-lume.yml" concurrency: group: lume-${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml new file mode 100644 index 00000000..91965c07 --- /dev/null +++ b/.github/workflows/link-check.yml @@ -0,0 +1,74 @@ +name: Link Checker + +on: + pull_request_target: + branches: [main, master] + push: + branches: + - main + workflow_dispatch: + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Run Lychee link checker + uses: lycheeverse/lychee-action@v2 + id: lychee + with: + # Check all markdown files + args: --verbose --no-progress --max-cache-age 1d --accept 200..=299,403 --exclude '^file://' --exclude 'localhost' --exclude '127\.0\.0\.1' '**/*.md' + # Output results to file for parsing + output: lychee-output.md + # Don't fail the build on broken links (warning mode) + fail: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Parse link check results + id: parse-results + if: always() + run: | + # Use lychee exit code: 0 = success, >0 = errors found + EXIT_CODE="${{ steps.lychee.outputs.exit_code }}" + + echo "Exit code: $EXIT_CODE" + + # Show summary if output file exists + if [ -f "lychee-output.md" ]; then + echo "=== Link Check Summary ===" + cat lychee-output.md + fi + + # Set status based on exit code + if [ "$EXIT_CODE" = "0" ]; then + echo "STATUS_ICON=βœ…" >> $GITHUB_ENV + echo "STATUS_TEXT=All links are working" >> $GITHUB_ENV + echo "COLOR=#36a64f" >> $GITHUB_ENV + elif [ "$EXIT_CODE" = "2" ]; then + echo "STATUS_ICON=❌" >> $GITHUB_ENV + echo "STATUS_TEXT=Link checker failed to run" >> $GITHUB_ENV + echo "COLOR=#dc3545" >> $GITHUB_ENV + else + echo "STATUS_ICON=⚠️" >> $GITHUB_ENV + echo "STATUS_TEXT=Found broken links" >> $GITHUB_ENV + echo "COLOR=#ffa500" >> $GITHUB_ENV + fi + + - name: Send results to Slack + if: always() && github.ref == 'refs/heads/main' + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: ${{ vars.SLACK_CHANNEL }} + SLACK_TITLE: "πŸ”— Link Check Results" + SLACK_COLOR: ${{ env.COLOR }} + SLACK_MESSAGE: | + *Status:* ${{ env.STATUS_ICON }} ${{ env.STATUS_TEXT }} + + *Branch:* `${{ github.ref_name }}` + + <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}${{ github.event.pull_request.number && format('?pr={0}', github.event.pull_request.number) || '' }}|View broken links> diff --git a/.github/workflows/npm-publish-cli.yml b/.github/workflows/npm-publish-cli.yml new file mode 100644 index 00000000..4bc2826b --- /dev/null +++ b/.github/workflows/npm-publish-cli.yml @@ -0,0 +1,212 @@ +name: Publish @trycua/cli + +on: + workflow_dispatch: + inputs: + version: + description: "Version to publish (default: from package.json)" + required: false + default: "" + +jobs: + build-and-publish: + permissions: + id-token: write + contents: write + packages: write + + strategy: + matrix: + include: + - target: bun-linux-x64 + ext: "" + binary_name: cua-linux-x64 + - target: bun-darwin-x64 + ext: "" + binary_name: cua-darwin-x64 + - target: bun-darwin-arm64 + ext: "" + binary_name: cua-darwin-arm64 + - target: bun-windows-x64 + ext: ".exe" + binary_name: cua-windows-x64 + + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: latest + + - name: Get version + id: version + run: | + if [ -n "${{ github.event.inputs.version }}" ]; then + echo "version=${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT + else + VERSION=$(bun -p "require('./libs/typescript/cua-cli/package.json').version") + echo "version=${VERSION}" >> $GITHUB_OUTPUT + fi + + - name: Install dependencies + working-directory: ./libs/typescript/cua-cli + run: bun install --frozen-lockfile + + - name: Build binary + working-directory: ./libs/typescript/cua-cli + run: | + bun build --compile --minify --sourcemap --target=${{ matrix.target }} index.ts --outfile ${{ matrix.binary_name }}${{ matrix.ext }} + mkdir -p ../../../dist + mv ${{ matrix.binary_name }}${{ matrix.ext }}* ../../../dist/ + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: cua-binary-${{ matrix.target }} + path: dist/ + if-no-files-found: error + retention-days: 1 + + publish-npm: + needs: build-and-publish + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/cua-v') + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: latest + + - name: Install dependencies + working-directory: ./libs/typescript/cua-cli + run: bun install --frozen-lockfile + + - name: Publish to npm + working-directory: ./libs/typescript/cua-cli + env: + NPM_CONFIG_TOKEN: ${{ secrets.NPM_TOKEN }} + run: bun publish --production --access public --tolerate-republish + + create-release: + needs: [build-and-publish, publish-npm] + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: latest + + - name: Get version + id: version + run: | + VERSION=$(bun -p "require('./libs/typescript/cua-cli/package.json').version") + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "tag=cua-v${VERSION}" >> $GITHUB_OUTPUT + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: dist + merge-multiple: true + + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.version.outputs.tag }} + release_name: cua-cli v${{ steps.version.outputs.version }} + body: | + # cua-cli v${{ steps.version.outputs.version }} + + ## Installation + + ### Using install script (recommended) + ```bash + # For Linux/macOS + curl -fsSL https://cua.ai/cli/install.sh | sh + + # For Windows (PowerShell) + irm https://cua.ai/cli/install.ps1 | iex + ``` + + ### Using npm/bun + ```bash + # Using bun + bun add -g @trycua/cli + + # Or using npm + npm install -g @trycua/cli + ``` + + ### From source + ```bash + git clone -b ${{ steps.version.outputs.tag }} https://github.com/trycua/cua.git + cd cua/libs/typescript/cua-cli + bun install + bun link + bun link cua-cli + ``` + + ## Release Assets + - `cua-darwin-arm64`: macOS (Apple Silicon) + - `cua-darwin-x64`: macOS (Intel) + - `cua-linux-x64`: Linux (x86_64) + - `cua-windows-x64.exe`: Windows (x86_64) + draft: false + prerelease: false + + - name: Upload Linux Binary + uses: actions/upload-release-asset@v1 + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./dist/cua-linux-x64 + asset_name: cua-linux-x64 + asset_content_type: application/octet-stream + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload macOS Intel Binary + uses: actions/upload-release-asset@v1 + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./dist/cua-darwin-x64 + asset_name: cua-darwin-x64 + asset_content_type: application/octet-stream + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload macOS Apple Silicon Binary + uses: actions/upload-release-asset@v1 + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./dist/cua-darwin-arm64 + asset_name: cua-darwin-arm64 + asset_content_type: application/octet-stream + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload Windows Binary + uses: actions/upload-release-asset@v1 + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./dist/cua-windows-x64.exe + asset_name: cua-windows-x64.exe + asset_content_type: application/octet-stream + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pypi-publish-agent.yml b/.github/workflows/pypi-publish-agent.yml index c36c1c1b..3ae5f189 100644 --- a/.github/workflows/pypi-publish-agent.yml +++ b/.github/workflows/pypi-publish-agent.yml @@ -31,26 +31,39 @@ jobs: core_version: ${{ steps.update-deps.outputs.core_version }} steps: - uses: actions/checkout@v4 + with: + ref: main + fetch-depth: 0 + + - name: Ensure latest main branch + run: | + git fetch origin main + git reset --hard origin/main + echo "Current HEAD commit:" + git log -1 --oneline - name: Determine version id: get-version run: | - if [ "${{ github.event_name }}" == "push" ]; then + # Check inputs.version first (works for workflow_call regardless of event_name) + if [ -n "${{ inputs.version }}" ]; then + VERSION=${{ inputs.version }} + elif [ "${{ github.event_name }}" == "push" ]; then # Extract version from tag (for package-specific tags) if [[ "${{ github.ref }}" =~ ^refs/tags/agent-v([0-9]+\.[0-9]+\.[0-9]+) ]]; then VERSION=${BASH_REMATCH[1]} else - echo "Invalid tag format for agent" + echo "ERROR: Invalid tag format for agent" exit 1 fi - elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then - # Use version from workflow dispatch + elif [ -n "${{ github.event.inputs.version }}" ]; then VERSION=${{ github.event.inputs.version }} else - # Use version from workflow_call - VERSION=${{ inputs.version }} + echo "ERROR: No version found (inputs.version, event.inputs.version, and tag all empty)" + exit 1 fi - echo "VERSION=$VERSION" + + echo "Agent version: $VERSION" echo "version=$VERSION" >> $GITHUB_OUTPUT - name: Set up Python diff --git a/.github/workflows/pypi-publish-computer.yml b/.github/workflows/pypi-publish-computer.yml index ea5d644b..c2dd4029 100644 --- a/.github/workflows/pypi-publish-computer.yml +++ b/.github/workflows/pypi-publish-computer.yml @@ -33,21 +33,39 @@ jobs: - name: Determine version id: get-version run: | - if [ "${{ github.event_name }}" == "push" ]; then + echo "=== Version Detection Debug ===" + echo "Event name: ${{ github.event_name }}" + echo "Workflow call version: ${{ inputs.version }}" + echo "Workflow dispatch version: ${{ github.event.inputs.version }}" + echo "GitHub ref: ${{ github.ref }}" + + # Check inputs.version first (works for workflow_call regardless of event_name) + if [ -n "${{ inputs.version }}" ]; then + # Version provided via workflow_call or workflow_dispatch with version input + VERSION=${{ inputs.version }} + echo "Using inputs.version: $VERSION" + elif [ "${{ github.event_name }}" == "push" ]; then # Extract version from tag (for package-specific tags) if [[ "${{ github.ref }}" =~ ^refs/tags/computer-v([0-9]+\.[0-9]+\.[0-9]+) ]]; then VERSION=${BASH_REMATCH[1]} + echo "Extracted from tag: $VERSION" else echo "Invalid tag format for computer" exit 1 fi - elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then - # Use version from workflow dispatch + elif [ -n "${{ github.event.inputs.version }}" ]; then + # Use version from workflow_dispatch event inputs VERSION=${{ github.event.inputs.version }} + echo "Using event.inputs.version: $VERSION" else - # Use version from workflow_call - VERSION=${{ inputs.version }} + echo "ERROR: No version found!" + echo " - inputs.version is empty" + echo " - event.inputs.version is empty" + echo " - Not a tag push event" + exit 1 fi + + echo "=== Final Version ===" echo "VERSION=$VERSION" echo "version=$VERSION" >> $GITHUB_OUTPUT diff --git a/.github/workflows/pypi-reusable-publish.yml b/.github/workflows/pypi-reusable-publish.yml index 614d8a7d..c7fabf05 100644 --- a/.github/workflows/pypi-reusable-publish.yml +++ b/.github/workflows/pypi-reusable-publish.yml @@ -47,8 +47,16 @@ jobs: steps: - uses: actions/checkout@v4 with: + ref: main fetch-depth: 0 # Full history for release creation + - name: Ensure latest main branch + run: | + git fetch origin main + git reset --hard origin/main + echo "Current HEAD commit:" + git log -1 --oneline + - name: Set up Python uses: actions/setup-python@v4 with: @@ -78,7 +86,7 @@ jobs: # Verify version matches using script (exits with error if mismatch) python ${GITHUB_WORKSPACE}/.github/scripts/get_pyproject_version.py \ - ${{ inputs.package_dir }}/pyproject.toml \ + ${GITHUB_WORKSPACE}/${{ inputs.package_dir }}/pyproject.toml \ ${{ inputs.version }} - name: Initialize PDM in package directory diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 6bfbed5c..e8eb0c8b 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -49,8 +49,15 @@ jobs: # Install the package in editable mode with dev dependencies if [ -f pyproject.toml ]; then uv pip install --system -e . - # Install test dependencies - uv pip install --system pytest pytest-asyncio pytest-mock pytest-cov + fi + shell: bash + + - name: Install test dependencies + run: | + # Install test dependencies from root pyproject.toml if tests directory exists + # The root pyproject.toml has package=false, so we install just the dependency group + if [ -d "libs/python/${{ matrix.package }}/tests" ]; then + uv pip install --system --group test fi shell: bash diff --git a/.github/workflows/test-cua-models.yml b/.github/workflows/test-cua-models.yml index 43e7af38..1ae28eac 100644 --- a/.github/workflows/test-cua-models.yml +++ b/.github/workflows/test-cua-models.yml @@ -4,8 +4,6 @@ name: Test CUA Supporting Models # Run manually using workflow_dispatch with test_models=true on: - pull_request_target: - branches: [main, master] workflow_dispatch: inputs: test_models: @@ -20,7 +18,7 @@ on: jobs: # Test all CUA models - runs on PRs, schedules, or when manually triggered test-all-models: - if: ${{ github.event_name == 'pull_request_target' || github.event_name == 'schedule' || fromJSON(inputs.test_models || 'false') }} + if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || fromJSON(inputs.test_models || 'false') }} runs-on: ubuntu-latest strategy: fail-fast: false @@ -42,13 +40,13 @@ jobs: - gemini-2.5-computer-use-preview-10-2025 # InternVL - - huggingface-local/OpenGVLab/InternVL3_5-1B + # - huggingface-local/OpenGVLab/InternVL3_5-1B # - huggingface-local/OpenGVLab/InternVL3_5-2B # - huggingface-local/OpenGVLab/InternVL3_5-4B # - huggingface-local/OpenGVLab/InternVL3_5-8B # UI-TARS (supports full computer-use, can run standalone) - - huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B + # - huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B # Note: OpenCUA, GTA, and Holo are grounding-only models # They only support predict_click(), not agent.run() @@ -56,7 +54,7 @@ jobs: # Moondream (typically used in composed agents) # Format: moondream3+{any-llm-with-tools} - - moondream3+anthropic/claude-sonnet-4-5-20250929 # Claude has VLM + Tools + # - moondream3+anthropic/claude-sonnet-4-5-20250929 # Claude has VLM + Tools # - moondream3+openai/gpt-4o # GPT-4o has VLM + Tools # OmniParser (typically used in composed agents) @@ -68,9 +66,9 @@ jobs: # Format: {grounding-model}+{any-vlm-with-tools} # These grounding-only models (OpenCUA, GTA, Holo) must be used in composed form # since they only support predict_click(), not full agent.run() - - huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-sonnet-4-5-20250929 - - huggingface-local/xlangai/OpenCUA-7B+anthropic/claude-sonnet-4-5-20250929 - - huggingface-local/Hcompany/Holo1.5-3B+anthropic/claude-sonnet-4-5-20250929 + # - huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-sonnet-4-5-20250929 + # - huggingface-local/xlangai/OpenCUA-7B+anthropic/claude-sonnet-4-5-20250929 + # - huggingface-local/Hcompany/Holo1.5-3B+anthropic/claude-sonnet-4-5-20250929 steps: - name: Checkout repository @@ -219,6 +217,7 @@ jobs: path: | tests/agent_loop_testing/test_images/ *.log + if-no-files-found: ignore retention-days: 7 - name: Upload test summary data @@ -228,6 +227,7 @@ jobs: # Unique, slash-free artifact name per matrix entry name: test-summary-${{ env.SAFE_MODEL_NAME }} path: test_summary/ + if-no-files-found: ignore retention-days: 1 - name: Set default Slack color @@ -248,7 +248,7 @@ jobs: # Summary job that aggregates all model test results test-summary: - if: ${{ always() && (github.event_name == 'pull_request_target' || github.event_name == 'schedule' || fromJSON(inputs.test_models || 'false')) }} + if: ${{ always() && (github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || fromJSON(inputs.test_models || 'false')) }} needs: test-all-models runs-on: ubuntu-latest steps: diff --git a/.prettierignore b/.prettierignore index 7a28e2a7..81d0f089 100644 --- a/.prettierignore +++ b/.prettierignore @@ -29,4 +29,7 @@ venv/ *.db *.sqlite pnpm-lock.yaml -uv.lock \ No newline at end of file +uv.lock + +# Docs with complex JSX formatting +docs/content/docs/get-started/quickstart.mdx \ No newline at end of file diff --git a/Development.md b/Development.md index 4b18fad7..3b99c565 100644 --- a/Development.md +++ b/Development.md @@ -376,6 +376,61 @@ All packages are managed through a single consolidated workflow: [Bump Version]( 5. Click "Run workflow" to start the version bump 6. The workflow will automatically commit changes and push to main +## Releasing a New CLI Version + +To release a new version of the CUA CLI, follow these steps: + +### 1. Update the Version + +1. Update the version in `libs/typescript/cua-cli/package.json` +2. Commit the version change with a message like "Bump version to x.y.z" +3. Push the changes to the main branch + +### 2. Trigger the Release Workflow + +1. Go to the GitHub Actions tab in the repository +2. Select the "Publish @trycua/cli" workflow +3. Click "Run workflow" +4. Optionally, specify a version (e.g., "1.2.3") or leave empty to use the version from package.json +5. Click "Run workflow" + +The workflow will: + +- Build single-file executables for all supported platforms +- Publish the package to npm +- Create a GitHub release with the version tag (format: `cua-vX.Y.Z`) +- Attach all platform-specific binaries to the release + +### 3. Verify the Release + +1. Check the GitHub Releases page to ensure the new version is published +2. Verify the npm package was published to the registry +3. Test installation on different platforms: + + ```bash + # Test Linux/macOS installation + curl -fsSL https://cua.ai/install.sh | sh + + # Test Windows installation (PowerShell) + irm https://cua.ai/install.ps1 | iex + ``` + +### 4. Update Documentation + +Update any relevant documentation with the new version number, including: + +- Example code in documentation +- Any version-specific instructions +- Compatibility matrices + +### 5. Announce the Release + +- Create a new GitHub release with release notes +- Update the changelog if maintained separately +- Announce in relevant channels (Slack, Discord, etc.) + +--- + ### Rolling Back a Version Bump If you need to revert a version bump, follow these steps: diff --git a/README.md b/README.md index 52ca6fa1..395519f1 100644 --- a/README.md +++ b/README.md @@ -6,15 +6,17 @@ [![Python](https://img.shields.io/badge/Python-333333?logo=python&logoColor=white&labelColor=333333)](#) -[![Swift](https://img.shields.io/badge/Swift-F05138?logo=swift&logoColor=white)](#) -[![macOS](https://img.shields.io/badge/macOS-000000?logo=apple&logoColor=F0F0F0)](#) [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?&logo=discord&logoColor=white)](https://discord.com/invite/mVnXXpdE85)
trycua%2Fcua | Trendshift -**Cua** ("koo-ah") is Docker for [Computer-Use Agents](https://www.oneusefulthing.org/p/when-you-give-a-claude-a-mouse) - it enables AI agents to control full operating systems in virtual containers and deploy them locally or to the cloud. +**Cua** ("koo-ah") is an open-source framework for Computer-Use Agents - enabling AI systems to autonomously operate computers through visual understanding and action execution. Used for research, evaluation, and production deployment of desktop, browser, and mobile automation agents. + +## What are Computer-Use Agents? + +Computer-Use Agents (CUAs) are AI systems that can autonomously interact with computer interfaces through visual understanding and action execution. Unlike traditional automation tools that rely on brittle selectors or APIs, CUAs use vision-language models to perceive screen content and reason about interface interactions - enabling them to adapt to UI changes and handle complex, multi-step workflows across applications.
@@ -22,14 +24,14 @@ With the [Computer SDK](#computer-sdk), you can: -- automate Windows, Linux, and macOS VMs with a consistent, [pyautogui-like API](https://cua.ai/docs/docs/libraries/computer#interface-actions) -- create & manage VMs [locally](https://cua.ai/docs/docs/computer-sdk/computers#cua-local-containers) or using [Cua cloud](https://www.cua.ai/) +- automate Windows, Linux, and macOS VMs with a consistent, [pyautogui-like API](https://cua.ai/docs/computer-sdk/commands) +- create & manage VMs [locally](https://cua.ai/docs/quickstart-devs#using-computer) or using [Cua cloud](https://www.cua.ai/) With the [Agent SDK](#agent-sdk), you can: -- run computer-use models with a [consistent schema](https://cua.ai/docs/docs/agent-sdk/message-format) -- benchmark on OSWorld-Verified, SheetBench-V2, and more [with a single line of code using HUD](https://cua.ai/docs/docs/agent-sdk/integrations/hud) ([Notebook](https://github.com/trycua/cua/blob/main/notebooks/eval_osworld.ipynb)) -- combine UI grounding models with any LLM using [composed agents](https://cua.ai/docs/docs/agent-sdk/supported-agents/composed-agents) +- run computer-use models with a [consistent schema](https://cua.ai/docs/agent-sdk/message-format) +- benchmark on OSWorld-Verified (369 tasks), SheetBench-V2, and ScreenSpot [with a single line of code using HUD](https://cua.ai/docs/agent-sdk/integrations/hud) - see [benchmark results](#research--benchmarks) ([Notebook](https://github.com/trycua/cua/blob/main/notebooks/eval_osworld.ipynb)) +- combine UI grounding models with any LLM using [composed agents](https://cua.ai/docs/agent-sdk/supported-agents/composed-agents) - use new UI agent models and UI grounding models from the Model Zoo below with just a model string (e.g., `ComputerAgent(model="openai/computer-use-preview")`) - use API or local inference by changing a prefix (e.g., `openai/`, `openrouter/`, `ollama/`, `huggingface-local/`, `mlx/`, [etc.](https://docs.litellm.ai/docs/providers)) @@ -96,8 +98,8 @@ Core utilities for Cua # Quick Start - [Clone a starter template and run the code in <1 min](https://github.com/trycua/agent-template) -- [Get started with the Cua SDKs](https://cua.ai/docs/docs/quickstart-devs) -- [Get started with the Cua CLI](https://cua.ai/docs/docs/quickstart-cli) +- [Get started with the Cua SDKs](https://cua.ai/docs/quickstart-devs) +- [Get started with the Cua CLI](https://cua.ai/docs/quickstart-cli) # Agent SDK @@ -115,7 +117,7 @@ from agent import ComputerAgent # ComputerAgent works with any computer initialized with the Computer SDK agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], max_trajectory_budget=5.0 ) @@ -194,12 +196,12 @@ Cua uses the OpenAI Agent response format. These are the valid model configurations for `ComputerAgent(model="...")`: -| Configuration | Description | -| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | -| `{computer-use-model}` | A single model to perform all computer-use tasks | -| `{grounding-model}+{any-vlm-with-tools}` | [Composed](https://cua.ai/docs/docs/agent-sdk/supported-agents/composed-agents) with VLM for captioning and grounding LLM for element detection | -| `moondream3+{any-llm-with-tools}` | [Composed](https://cua.ai/docs/docs/agent-sdk/supported-agents/composed-agents) with Moondream3 for captioning and UI element detection | -| `human/human` | A [human-in-the-loop](https://cua.ai/docs/docs/agent-sdk/supported-agents/human-in-the-loop) in place of a model | +| Configuration | Description | +| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `{computer-use-model}` | A single model to perform all computer-use tasks | +| `{grounding-model}+{any-vlm-with-tools}` | [Composed](https://cua.ai/docs/agent-sdk/supported-agents/composed-agents) with VLM for captioning and grounding LLM for element detection | +| `moondream3+{any-llm-with-tools}` | [Composed](https://cua.ai/docs/agent-sdk/supported-agents/composed-agents) with Moondream3 for captioning and UI element detection | +| `human/human` | A [human-in-the-loop](https://cua.ai/docs/agent-sdk/supported-agents/human-in-the-loop) in place of a model | ### Model Capabilities @@ -209,16 +211,46 @@ The following table shows which capabilities are supported by each model: | -------------------------------------------------------------------------------------------------------------------------------- | :----------: | :-------: | :---: | :-: | | [Claude Sonnet/Haiku](https://docs.claude.com/en/docs/agents-and-tools/tool-use/computer-use-tool#how-to-implement-computer-use) | πŸ–₯️ | 🎯 | πŸ› οΈ | πŸ‘οΈ | | [OpenAI CU Preview](https://platform.openai.com/docs/models/computer-use-preview) | πŸ–₯️ | 🎯 | | πŸ‘οΈ | +| [Qwen3 VL](https://huggingface.co/collections/Qwen/qwen3-vl) | πŸ–₯️ | 🎯 | πŸ› οΈ | πŸ‘οΈ | | [GLM-V](https://huggingface.co/THUDM/glm-4v-9b) | πŸ–₯️ | 🎯 | πŸ› οΈ | πŸ‘οΈ | | [Gemini CU Preview](https://ai.google.dev/gemini-api/docs/computer-use) | πŸ–₯️ | 🎯 | | πŸ‘οΈ | | [InternVL](https://huggingface.co/OpenGVLab/InternVL3_5-1B) | πŸ–₯️ | 🎯 | πŸ› οΈ | πŸ‘οΈ | | [UI-TARS](https://huggingface.co/ByteDance-Seed/UI-TARS-1.5-7B) | πŸ–₯️ | 🎯 | πŸ› οΈ | πŸ‘οΈ | +| [UI-TARS-2](https://cua.ai/dashboard/vlm-router) | πŸ–₯️ | 🎯 | πŸ› οΈ | πŸ‘οΈ | | [OpenCUA](https://huggingface.co/xlangai/OpenCUA-7B) | | 🎯 | | | | [GTA](https://huggingface.co/HelloKKMe/GTA1-7B) | | 🎯 | | | | [Holo](https://huggingface.co/Hcompany/Holo1.5-3B) | | 🎯 | | | | [Moondream](https://huggingface.co/moondream/moondream3-preview) | | 🎯 | | | | [OmniParser](https://github.com/microsoft/OmniParser) | | 🎯 | | | +**Legend:** + +- πŸ–₯️ **Computer-Use**: Full agentic loop with planning and execution +- 🎯 **Grounding**: UI element detection and click coordinate prediction +- πŸ› οΈ **Tools**: Support for function calling beyond screen interaction +- πŸ‘οΈ **VLM**: Vision-language understanding + +**Composition Examples:** + +See more examples on our [composition docs](https://cua.ai/docs/agent-sdk/supported-agents/composed-agents). + +```python +# Use OpenAI's GPT-5 for planning with specialized grounding +agent = ComputerAgent(model="huggingface-local/HelloKKMe/GTA1-7B+openai/gpt-5") + +# Composition via OmniParser +agent = ComputerAgent(model="omniparser+openai/gpt-4o") + +# Combine state-of-the-art grounding with powerful reasoning +agent = ComputerAgent(model="huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-sonnet-4-5-20250929") + +# Combine two different vision models for enhanced capabilities +agent = ComputerAgent(model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B+openai/gpt-4o") + +# Use the built-in Moondream3 grounding with any planning mode. +agent = ComputerAgent(model="moondream3+openai/gpt-4o") +``` + ### Model IDs
@@ -229,9 +261,11 @@ The following table shows which capabilities are supported by each model: | [Claude Sonnet/Haiku](https://docs.claude.com/en/docs/agents-and-tools/tool-use/computer-use-tool#how-to-implement-computer-use) | `anthropic/claude-sonnet-4-5`, `anthropic/claude-haiku-4-5` | | [OpenAI CU Preview](https://platform.openai.com/docs/models/computer-use-preview) | `openai/computer-use-preview` | | [GLM-V](https://huggingface.co/THUDM/glm-4v-9b) | `openrouter/z-ai/glm-4.5v`, `huggingface-local/zai-org/GLM-4.5V` | +| [Qwen3 VL](https://huggingface.co/collections/Qwen/qwen3-vl) | `openrouter/qwen/qwen3-vl-235b-a22b-instruct` | | [Gemini CU Preview](https://ai.google.dev/gemini-api/docs/computer-use) | `gemini-2.5-computer-use-preview` | | [InternVL](https://huggingface.co/OpenGVLab/InternVL3_5-1B) | `huggingface-local/OpenGVLab/InternVL3_5-{1B,2B,4B,8B,...}` | | [UI-TARS](https://huggingface.co/ByteDance-Seed/UI-TARS-1.5-7B) | `huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B` | +| [UI-TARS-2](https://cua.ai/dashboard/vlm-router) | `cua/bytedance/ui-tars-2` | | [OpenCUA](https://huggingface.co/xlangai/OpenCUA-7B) | `huggingface-local/xlangai/OpenCUA-{7B,32B}` | | [GTA](https://huggingface.co/HelloKKMe/GTA1-7B) | `huggingface-local/HelloKKMe/GTA1-{7B,32B,72B}` | | [Holo](https://huggingface.co/Hcompany/Holo1.5-3B) | `huggingface-local/Hcompany/Holo1.5-{3B,7B,72B}` | @@ -273,7 +307,7 @@ try: # Click and type await computer.interface.left_click(100, 100) - await computer.interface.type("Hello!") + await computer.interface.type_text("Hello!") finally: await computer.close() ``` @@ -331,6 +365,46 @@ pip install cua-som Learn more in the [SOM documentation](./libs/python/som/README.md). +# Recent Updates + +## 2025 + +### September 2025 + +- **Hack the North Competition**: First benchmark-driven hackathon track with guaranteed YC interview prize. Winner achieved 68.3% on OSWorld-Tiny ([Blog Post](https://www.cua.ai/blog/hack-the-north)) +- **Global Hackathon Launch**: Ollama Γ— Cua global online competition for creative local/hybrid agents + +### August 2025 + +- **v0.4 Release - Composite Agents**: Mix grounding + planning models with `+` operator (e.g., `"GTA-7B+GPT-4o"`) ([Blog Post](https://www.cua.ai/blog/composite-agents)) +- **HUD Integration**: One-line benchmarking on OSWorld-Verified with live trace visualization ([Blog Post](https://www.cua.ai/blog/hud-agent-evals)) +- **Human-in-the-Loop**: Interactive agent mode with `human/human` model string +- **Web-Based Computer Use**: Browser-based agent execution ([Blog Post](https://www.cua.ai/blog/bringing-computer-use-to-the-web)) + +### June 2025 + +- **Windows Sandbox Support**: Native Windows agent execution ([Blog Post](https://www.cua.ai/blog/windows-sandbox)) +- **Containerization Evolution**: From Lume to full Docker support ([Blog Post](https://www.cua.ai/blog/lume-to-containerization)) +- **Sandboxed Python Execution**: Secure code execution in agent workflows + +### May 2025 + +- **Cua Cloud Containers**: Production-ready cloud deployment with elastic scaling ([Blog Post](https://www.cua.ai/blog/introducing-cua-cloud-containers)) +- **Trajectory Viewer**: Visual debugging tool for agent actions ([Blog Post](https://www.cua.ai/blog/trajectory-viewer)) +- **Training Data Collection**: Tools for creating computer-use training datasets ([Blog Post](https://www.cua.ai/blog/training-computer-use-models-trajectories-1)) +- **App-Use Framework**: Mobile and desktop app automation capabilities + +### April 2025 + +- **Agent Framework v0.4**: Unified API for 100+ model configurations +- **UI-TARS Integration**: Local inference support for ByteDance's desktop-optimized model +- **Blog Series**: "Build Your Own Operator" tutorials ([Part 1](https://www.cua.ai/blog/build-your-own-operator-on-macos-1) | [Part 2](https://www.cua.ai/blog/build-your-own-operator-on-macos-2)) + +### March 2025 + +- **Initial Public Release**: Core Agent SDK and Computer SDK +- **Lume VM Manager**: macOS VM management tool for local development + # Resources - [Cua Blog](https://www.cua.ai/blog) diff --git a/blog/app-use.md b/blog/app-use.md index 68cf9c9b..1985713c 100644 --- a/blog/app-use.md +++ b/blog/app-use.md @@ -25,7 +25,7 @@ desktop = computer.create_desktop_from_apps(["Safari", "Notes"]) # Your agent can now only see and interact with these apps agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[desktop] ) ``` @@ -94,7 +94,7 @@ async def main(): # Initialize an agent agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[desktop] ) @@ -160,7 +160,7 @@ async def automate_iphone(): # Initialize an agent for iPhone automation agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[my_iphone] ) diff --git a/blog/build-your-own-operator-on-macos-1.md b/blog/build-your-own-operator-on-macos-1.md index dd075e01..f4e8ff23 100644 --- a/blog/build-your-own-operator-on-macos-1.md +++ b/blog/build-your-own-operator-on-macos-1.md @@ -8,7 +8,7 @@ In this first blogpost, we'll learn how to build our own Computer-Use Operator u - **computer-use-preview** is OpenAI's specialized language model trained to understand and interact with computer interfaces through screenshots. - A **Computer-Use Agent** is an AI agent that can control a computer just like a human would - clicking buttons, typing text, and interacting with applications. -Our Operator will run in an isolated macOS VM, by making use of our [cua-computer](https://github.com/trycua/cua/tree/main/libs/computer) package and [lume virtualization CLI](https://github.com/trycua/cua/tree/main/libs/lume). +Our Operator will run in an isolated macOS VM, by making use of our [cua-computer](https://github.com/trycua/cua/tree/main/libs/python/computer) package and [lume virtualization CLI](https://github.com/trycua/cua/tree/main/libs/lume). Check out what it looks like to use your own Operator from a Gradio app: @@ -294,7 +294,7 @@ This design keeps everything organized and safe. The AI can only interact with t ### Prerequisites 1. **Lume CLI Setup** - For installing the standalone lume binary, run the following command from a terminal, or download the [latest pkg](https://github.com/trycua/cua/releases/latest/download/lume.pkg.tar.gz). + For installing the standalone lume binary, run the following command from a terminal, or download the [latest pkg](https://github.com/trycua/cua/releases/download/lume-v0.2.22/lume-darwin.pkg.tar.gz). ```bash sudo /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" @@ -567,10 +567,10 @@ In a production setting, you would wrap the action-response cycle in a loop, han ### Next Steps -In the next blogpost, we'll introduce our Agent framework which abstracts away all these tedious implementation steps. This framework provides a higher-level API that handles the interaction loop between OpenAI's computer-use model and the macOS sandbox, allowing you to focus on building sophisticated applications rather than managing the low-level details we've explored here. Can't wait? Check out the [cua-agent](https://github.com/trycua/cua/tree/main/libs/agent) package! +In the next blogpost, we'll introduce our Agent framework which abstracts away all these tedious implementation steps. This framework provides a higher-level API that handles the interaction loop between OpenAI's computer-use model and the macOS sandbox, allowing you to focus on building sophisticated applications rather than managing the low-level details we've explored here. Can't wait? Check out the [cua-agent](https://github.com/trycua/cua/tree/main/libs/python/agent) package! ### Resources - [OpenAI Computer-Use docs](https://platform.openai.com/docs/guides/tools-computer-use) -- [cua-computer](https://github.com/trycua/cua/tree/main/libs/computer) +- [cua-computer](https://github.com/trycua/cua/tree/main/libs/python/computer) - [lume](https://github.com/trycua/cua/tree/main/libs/lume) diff --git a/blog/build-your-own-operator-on-macos-2.md b/blog/build-your-own-operator-on-macos-2.md index bf521b75..2c3d8ccb 100644 --- a/blog/build-your-own-operator-on-macos-2.md +++ b/blog/build-your-own-operator-on-macos-2.md @@ -145,9 +145,9 @@ While the core concept remains the same across all agent loops, different AI mod | Agent Loop | Supported Models | Description | Set-Of-Marks | |:-----------|:-----------------|:------------|:-------------| | `AgentLoop.OPENAI` | β€’ `computer_use_preview` | Use OpenAI Operator CUA Preview model | Not Required | -| `AgentLoop.ANTHROPIC` | β€’ `claude-3-5-sonnet-20240620`
β€’ `claude-3-7-sonnet-20250219` | Use Anthropic Computer-Use Beta Tools | Not Required | +| `AgentLoop.ANTHROPIC` | β€’ `claude-sonnet-4-5-20250929`
β€’ `claude-3-7-sonnet-20250219` | Use Anthropic Computer-Use Beta Tools | Not Required | | `AgentLoop.UITARS` | β€’ `ByteDance-Seed/UI-TARS-1.5-7B` | Uses ByteDance's UI-TARS 1.5 model | Not Required | -| `AgentLoop.OMNI` | β€’ `claude-3-5-sonnet-20240620`
β€’ `claude-3-7-sonnet-20250219`
β€’ `gpt-4.5-preview`
β€’ `gpt-4o`
β€’ `gpt-4`
β€’ `phi4`
β€’ `phi4-mini`
β€’ `gemma3`
β€’ `...`
β€’ `Any Ollama or OpenAI-compatible model` | Use OmniParser for element pixel-detection (SoM) and any VLMs for UI Grounding and Reasoning | OmniParser | +| `AgentLoop.OMNI` | β€’ `claude-sonnet-4-5-20250929`
β€’ `claude-3-7-sonnet-20250219`
β€’ `gpt-4.5-preview`
β€’ `gpt-4o`
β€’ `gpt-4`
β€’ `phi4`
β€’ `phi4-mini`
β€’ `gemma3`
β€’ `...`
β€’ `Any Ollama or OpenAI-compatible model` | Use OmniParser for element pixel-detection (SoM) and any VLMs for UI Grounding and Reasoning | OmniParser | Each loop handles the same basic pattern we implemented manually in Part 1: @@ -171,7 +171,7 @@ The `cua-agent` framework provides multiple agent loop implementations to abstra - **AgentLoop.OMNI**: The most flexible option that works with virtually any vision-language model including local and open-source ones. Perfect for cost-effective development or when you need to use models without native computer-use capabilities. -These abstractions allow you to easily switch between providers without changing your application code. All loop implementations are available in the [cua-agent GitHub repository](https://github.com/trycua/cua/tree/main/libs/agent/agent/providers). +These abstractions allow you to easily switch between providers without changing your application code. All loop implementations are available in the [cua-agent GitHub repository](https://github.com/trycua/cua/tree/main/libs/python/agent). Choosing the right agent loop depends not only on your API access and technical requirements but also on the specific tasks you need to accomplish. To make an informed decision, it's helpful to understand how these underlying models perform across different computing environments – from desktop operating systems to web browsers and mobile interfaces. @@ -191,7 +191,7 @@ The performance of different Computer-Use models varies significantly across tas - **AgentLoop.OPENAI**: Choose when you have OpenAI Tier 3 access and need the most capable computer-use agent for web-based tasks. Uses the same [OpenAI Computer-Use Loop](https://platform.openai.com/docs/guides/tools-computer-use) as Part 1, delivering strong performance on browser-based benchmarks. -- **AgentLoop.ANTHROPIC**: Ideal for users with Anthropic API access who need strong reasoning capabilities with computer-use abilities. Works with `claude-3-5-sonnet-20240620` and `claude-3-7-sonnet-20250219` models following [Anthropic's Computer-Use tools](https://docs.anthropic.com/en/docs/agents-and-tools/computer-use#understanding-the-multi-agent-loop). +- **AgentLoop.ANTHROPIC**: Ideal for users with Anthropic API access who need strong reasoning capabilities with computer-use abilities. Works with `claude-sonnet-4-5-20250929` and `claude-3-7-sonnet-20250219` models following [Anthropic's Computer-Use tools](https://docs.anthropic.com/en/docs/agents-and-tools/computer-use#understanding-the-multi-agent-loop). - **AgentLoop.UITARS**: Best for scenarios requiring more powerful OS/desktop, and latency-sensitive automation, as UI-TARS-1.5 leads in OS capabilities benchmarks. Requires running the model locally or accessing it through compatible endpoints (e.g. on Hugging Face). @@ -268,7 +268,7 @@ from agent import ComputerAgent async def run_multi_task_workflow(): async with Computer() as macos_computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[macos_computer] ) @@ -674,7 +674,7 @@ With the basics covered, you might want to explore: ### Resources -- [cua-agent GitHub repository](https://github.com/trycua/cua/tree/main/libs/agent) +- [cua-agent GitHub repository](https://github.com/trycua/cua/tree/main/libs/python/agent) - [Agent Notebook Examples](https://github.com/trycua/cua/blob/main/notebooks/agent_nb.ipynb) - [OpenAI Agent SDK Specification](https://platform.openai.com/docs/api-reference/responses) - [Anthropic API Documentation](https://docs.anthropic.com/en/api/getting-started) diff --git a/blog/cloud-windows-ga-macos-preview.md b/blog/cloud-windows-ga-macos-preview.md new file mode 100644 index 00000000..7f778daf --- /dev/null +++ b/blog/cloud-windows-ga-macos-preview.md @@ -0,0 +1,120 @@ +# Cloud Windows Sandboxes GA + macOS Preview + +If you've been building with our `cua` libraries, you might've hit a limitation with local computer-use sandboxes: to run agents on Windows or macOS, you need to be on that OS - Windows Sandbox for Windows, Apple Virtualization for macOS. The only cross-platform option is Linux on Docker, which limits you to virtualizing Linux environments ([see all local options here](https://cua.ai/docs/computer-sdk/computers)). + +Today the story changes - we're announcing general availability of **Cloud Windows Sandboxes** and opening early preview access for **Cloud macOS Sandboxes**. + +## Cloud Windows Sandboxes: Now GA + +![Cloud Windows Sandboxes](https://github.com/user-attachments/assets/db15f4c4-70a4-425a-a264-82e629074de7) + +Cloud Windows Sandboxes are now generally available. You get a full Windows 11 desktop in your browser with Edge and Python pre-installed, working seamlessly with all our [Computer-Use libraries](https://github.com/trycua/cua) for RPA, UI automation, code execution, and agent development. + +**What's new with this release:** + +- Hot-start under 1 second +- Direct noVNC over HTTPS under our sandbox.cua.ai domain +- 3 sandbox sizes available: + +| Size | CPU | RAM | Storage | +| ------ | ------- | ----- | ---------- | +| Small | 2 cores | 8 GB | 128 GB SSD | +| Medium | 4 cores | 16 GB | 128 GB SSD | +| Large | 8 cores | 32 GB | 256 GB SSD | + +
+ +
+ +**Pricing:** Windows Sandboxes start at 8 credits/hour (Small), 15 credits/hour (Medium), or 31 credits/hour (Large). + +## Cloud macOS Sandboxes: Now in Preview + +Running macOS locally comes with challenges: 30GB golden images, a maximum of 2 sandboxes per host, and unpredictable compatibility issues. With Cloud macOS Sandboxes, we provision bare-metal macOS hosts (M1, M2, M4) on-demandβ€”giving you full desktop access without the overhead of managing local sandboxes. + +![macOS Preview Waitlist](https://github.com/user-attachments/assets/343c9a3f-59d8-4b1a-bba8-6af91e8a9cf0) + +**Preview access:** Invite-only. [Join the waitlist](https://cua.ai/macos-waitlist) if you're building agents for macOS workflows. + +## Getting Started Today + +Sign up at [cua.ai/signin](https://cua.ai/signin) and grab your API key from the dashboard. Then connect to a sandbox: + +```python +from computer import Computer + +computer = Computer( + os_type="windows", # or "macos" + provider_type="cloud", + name="my-sandbox", + api_key="your-api-key" +) + +await computer.run() +``` + +Manage existing sandboxes: + +```python +from computer.providers.cloud.provider import CloudProvider + +provider = CloudProvider(api_key="your-api-key") +async with provider: + sandboxes = await provider.list_vms() + await provider.run_vm("my-sandbox") + await provider.stop_vm("my-sandbox") +``` + +Run an agent on Windows to automate a workflow: + +```python +from agent import ComputerAgent + +agent = ComputerAgent( + model="anthropic/claude-sonnet-4-5-20250929", + tools=[computer], + max_trajectory_budget=5.0 +) + +response = await agent.run( + "Open Excel, create a sales report with this month's data, and save it to the desktop" +) +``` + +## FAQs + +
+Why not just use local Windows Sandbox? + +Local Windows Sandbox resets on every restart. No persistence, no hot-start, and you need Windows Pro. Our sandboxes persist state, hot-start in under a second, and work from any OS. + +
+ +
+What happens to my work when I stop a sandbox? + +Everything persists. Files, installed software, browser profilesβ€”it's all there when you restart. Only pay for runtime, not storage. + +
+ +
+How's the latency for UI automation? + +We run in 4 regions so you can pick what's closest. The noVNC connection is optimized for automation, not video streaming. Your agent sees crisp screenshots, not compressed video. + +
+ +
+Are there software restrictions? + +No. Full admin access on both platforms. Install whatever you needβ€”Visual Studio, Photoshop, custom enterprise software. It's your sandbox. + +
+ +## Need help? + +If you hit issues getting either platform working, reach out in [Discord](https://discord.gg/cua-ai). We respond fast and fix based on what people actually use. + +--- + +Get started at [cua.ai](https://cua.ai) or [join the macOS waitlist](https://cua.ai/macos-waitlist). diff --git a/blog/composite-agents.md b/blog/composite-agents.md index 66af1869..2b8a7df3 100644 --- a/blog/composite-agents.md +++ b/blog/composite-agents.md @@ -14,12 +14,12 @@ This is the kind of problem that makes you wonder if we're building the future o Agent framework 0.4 solves this by doing something radical: making all these different models speak the same language. -Instead of writing separate code for each model's peculiarities, you now just pick a model with a string like `"anthropic/claude-3-5-sonnet-20241022"` or `"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B"`, and everything else Just Worksβ„’. Behind the scenes, we handle all the coordinate normalization, token parsing, and image preprocessing so you don't have to. +Instead of writing separate code for each model's peculiarities, you now just pick a model with a string like `"anthropic/claude-sonnet-4-5-20250929"` or `"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B"`, and everything else Just Worksβ„’. Behind the scenes, we handle all the coordinate normalization, token parsing, and image preprocessing so you don't have to. ```python # This works the same whether you're using Anthropic, OpenAI, or that new model you found on Hugging Face agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", # or any other supported model + model="anthropic/claude-sonnet-4-5-20250929", # or any other supported model tools=[computer] ) ``` diff --git a/blog/computer-use-agents-for-growth-hacking.md b/blog/computer-use-agents-for-growth-hacking.md new file mode 100644 index 00000000..c92dd60d --- /dev/null +++ b/blog/computer-use-agents-for-growth-hacking.md @@ -0,0 +1,172 @@ +# Computer Use Agents for Growth Hacking: The Cua-la Strategy + +_Published on January 16, 2025 by Sarina Li_ + +Esther and Sarina at DevFest Toronto + +Growing a developer-focused product is hard. Traditional marketing doesn't work. Booth rentals cost thousands. Sponsorships cost tens of thousands. + +So we tried something different at Google DevFest Toronto: show up with backpacks full of cute cua-la keychains and see what happens. + +This is the story of how two new hires, a growth engineer and a designer/artist, guerrilla marketed their way through a major tech conference with $200 worth of merch and a post-event automation pipeline. + +## Meet the Team + +**Sarina** (Growth Engineering): Built the post-event automation pipeline that extracts LinkedIn connections and generates personalized messages while you sleep. + +**Esther** (Design + Art): Hand-crafted every piece of artwork, giving life to Cua through illustrations, branding, and yes, extremely cute cua-la keychains. + +The thesis: what if we could draw people in with irresistible physical merch, then use computer use agents to handle all the tedious follow-up work? + +## The cua-la Strategy + +Guerrilla marketing at DevFest Toronto + +Google DevFest Toronto brought together hundreds of developers and AI enthusiasts. We didn't have a booth. We didn't have demos. We showed up with backpacks full of cua-la keychains with the cua.ai logo and started handing them out. + +That's it. Pure guerrilla marketing, the cua-las were absurdly effective. + +People would literally crowd around us, not because they were interested in computer use (at first), but because they wanted a cua-la. We'd pitch Cua while handing out keychains, and suddenly we had an engaged audience! + +DevFest crowd + +### The Magic Moment + +A few people stuck the cua-las on their bags immediately. Then, throughout the event, we started getting approached: + +"Wait, are you the Cua girls?" + +They'd seen the cua-las on someone's bag, asked about it, and tracked us down! The keychains became walking advertisements. + +Hack the North recognition at DevFest + +Even better: two attendees recognized Cua from Hack the North. Our previous event marketing was actually working. People remembered us. + +## Part 2: The Automation (Try It Yourself) + +After DevFest, we had 20+ new LinkedIn connections. Normally, this means hours of: + +- Manually copying names, roles, companies +- Opening each profile to find contact info +- Crafting personalized follow-up messages +- Updating your CRM + +Sarina had a better idea: build the automation we wish existed, then open source it. + +**The automation is live**: [Post-Event Contact Export cookbook](https://cua.ai/docs/example-usecases/post-event-contact-export) + +### How It Works + + + +The agent navigates LinkedIn like a human would: click profile, extract info, navigate back, repeat. But it does it overnight while you sleep. + +The secret sauce: **VM session persistence**. By logging into LinkedIn once through Cua's VM, the session stays alive. No captchas, no bot detection, just smooth automation. + + + +Wake up to a clean CSV with: + +- First name, last name +- Current role and company +- LinkedIn profile URLs +- Pre-generated messaging links + +Then use that data to craft personalized messages. Sarina wrote unique follow-ups for each person, mentioning specific conversations from DevFest. + +**Works for any platform**: LinkedIn, X/Twitter, or wherever your connections are. The cookbook includes full setup instructions and customizable code. + +## The Results + +**Cost Breakdown** + +- Booth rental: $0 (didn't have one) +- Sponsorship: $0 (didn't buy one) +- cua-la keychains: ~$200 +- Automation: Built by Sarina in a few hours post-event +- **Total spend: $200** + +**What We Got** + +- People crowding around us for cua-las +- Walking advertisements on bags throughout the event +- Instant brand recognition ("Are you the Cua girls?") +- Two people who remembered us from Hack the North +- 20+ quality connections extracted and messaged within 24 hours +- Several demo requests from personalized follow-ups + +**ROI** +Traditional event marketing at this scale: $5-10K minimum for booth + sponsorship. + +Our approach: $200 + scrappy execution. + +The automation is reuseable and will save hours of manual work, and the cua-las created more organic conversations than any booth could have. + +## What Didn't Work (Yet) + +**cua-la Distribution** +We ran out faster than expected! Next time: bigger bag, or limit to one per person. + +**Automation Setup** +The VM login step added friction. "Log in manually first, then run the script" confused some people who wanted to try it themselves. Need better first-run UX. + +**Message Personalization** +While the extraction was automated, I still wrote each follow-up message manually, I think we are looking for ways to better enrich messages with context from the event, which is hard to automate. + +## What's Next: NeurIPS 2025 + +NeurIPS is the biggest AI conference of the year. Thousands of researchers, hundreds of companies. + +**The good news**: We still have one giant bag of cua-las left. They're already packed and ready. + +**The better news**: We're upgrading the automation. + +### The Hypothesis + +The cua-las get people interested. The automation ensures we actually follow through. + +Most event marketing fails at the follow-up stage. You collect business cards, connect on LinkedIn, and then... nothing. The moment passes. People forget. + +With Cua handling the mechanical work (data organization, connection tracking, follow-up scheduling), we can focus on the human part: genuine conversations, valuable introductions, and actually helping people. + +## The Framework: Cute Merch + Smart Automation + +Traditional event marketing: show up, pitch, collect cards. + +Our approach: combine two forces that shouldn't work together but do. + +**The Physical Hook** + +- Make something people actually want (not another branded pen) +- Hand-crafted, memorable, Instagram-worthy +- Turns attendees into walking billboards +- Creates natural conversation starters + +**The Digital Follow-Through** + +- Automate the tedious post-event work +- Extract connections while you sleep +- Personalize follow-ups with real context +- Actually close the loop before the moment passes + +**Why It Works** +The cua-las get you in the door. The automation ensures you don't waste the opportunity. + +Most companies nail one or the other: + +- Great merch, terrible follow-up β†’ missed opportunities +- Amazing automation, boring presence β†’ no one cares + +Do both, and you create a flywheel: each event builds brand recognition for the next, while automation ensures maximum value from every connection. + +See you at NeurIPS 2025! + +--- + +_Want to build your own growth hacking automations? Check out [Cua on GitHub](https://github.com/trycua/cua) or join our [Discord](https://discord.gg/cua) to share your experiments. cua-las not included (yet)._ diff --git a/blog/cua-playground-preview.md b/blog/cua-playground-preview.md new file mode 100644 index 00000000..2f3d661f --- /dev/null +++ b/blog/cua-playground-preview.md @@ -0,0 +1,86 @@ +# Cua Playground: Agents + Sandboxes in Your Browser + +Building computer-use agents means constant iterationβ€”writing code, deploying to a sandbox, testing behavior, debugging issues, then repeating the cycle. Every test requires switching between your code editor, terminal, and VNC viewer. Want to try a different prompt? Edit your code, redeploy, and wait for the agent to restart. It works, but it's slow. + +Today we're launching the **Cua Playground**: a browser-based environment for testing computer-use agents without writing code. Send messages to your sandboxes, watch them execute in real-time, and iterate on prompts instantlyβ€”all from your dashboard at cua.ai. + +![Cua Playground](https://github.com/user-attachments/assets/af1071ba-3df3-4e4b-aafb-df8c3d00b0a5) + +**What's new with this release:** + +- Instant testingβ€”send messages to any running sandbox directly from your browser +- Real-time executionβ€”watch your agent work with live tool call updates and screenshots +- Multi-model supportβ€”test with Claude Sonnet 4.5, Haiku 4.5, and more +- Persistent chat historyβ€”conversations save automatically to local storage + +The Playground connects to your existing Cua sandboxesβ€”the same ones you use with the Agent SDK. Select a running sandbox and a model, then start chatting. The agent uses computer-use tools (mouse, keyboard, bash, editor) to complete your tasks, and you see every action it takes. + +## Getting Started Today + +
+ +
+ + +Sign up at [cua.ai/signin](https://cua.ai/signin) and grab your API key from the dashboard. Then navigate to the Playground: + +1. Navigate to Dashboard > Playground +2. Select a sandbox from the dropdown (must be "running" status) +3. Choose a model (we recommend Claude Sonnet 4.5 to start) +4. Send a message: "Take a screenshot and describe what you see" +5. Watch the agent execute computer actions in real-time + +Example use cases: + +**Prompt Testing** +``` +❌ "Check the website" +βœ… "Navigate to example.com in Firefox and take a screenshot of the homepage" +``` + +**Model Comparison** +Run the same task with different models to compare quality, speed, and cost. + +**Debugging Agent Behavior** +1. Send: "Find the login button and click it" +2. View tool calls to see each mouse movement +3. Check screenshots to verify the agent found the right element +4. Adjust your prompt based on what you observe + +## FAQs + +
+Do I need to know how to code? + +No. The Playground is designed for testing agent behavior without writing code. However, for production deployments, you'll need to use the Agent SDK (Python/TypeScript). + +
+ +
+Does this replace the Agent SDK? + +No. The Playground is for rapid testing and experimentation. For production deployments, scheduled tasks, or complex workflows, use the Agent SDK. + +
+ +
+How much does it cost? + +Playground requests use the same credit system as Agent SDK requests. You're charged for model inference (varies by model) and sandbox runtime (billed per hour while running). + +
+ +
+Why is my sandbox not showing up? + +The sandbox must have `status = "running"` to appear in the dropdown. Check Dashboard > Sandboxes to verify status. If stopped, click "Start" and wait ~30 seconds for it to become available. + +
+ +## Need help? + +If you hit issues getting the Playground working, reach out in [Discord](https://discord.gg/cua-ai). We respond fast and fix based on what people actually use. + +--- + +Get started at [cua.ai](https://cua.ai) or try the Playground at [cua.ai/dashboard/playground](https://cua.ai/dashboard/playground). diff --git a/blog/cua-vlm-router.md b/blog/cua-vlm-router.md new file mode 100644 index 00000000..9c980301 --- /dev/null +++ b/blog/cua-vlm-router.md @@ -0,0 +1,181 @@ +# Cua VLM Router: One Provider for All Your Computer-Use Models + +If you've been building computer-use agents, you know the reality: every model provider has its own specification and deployment process. Anthropic has one API format, OpenAI another, Google something else entirely. Want to try a Hugging Face model? That's a completely different setup. Self-hosting? Even more complexity. Each provider requires learning their specific API, managing their credentials, and adapting your code to their particular requirements. + +Today we're launching the **Cua VLM Router**: a managed inference API that gives you unified access to multiple vision-language model providers through a single API key. We're starting with Anthropic's Claude models (Sonnet 4.5 and Haiku 4.5)β€”some of the most loved and widely-used computer-use models in the Cua ecosystem - with more providers coming soon. + +![Cua VLM Router Banner](https://github.com/user-attachments/assets/1b978f62-2cae-4cf7-932a-55ac8c8f2e06) + +## What You Get + +The Cua VLM Router handles the infrastructure so you can focus on building: + +**Single API Key** + +- One key for all model providers (no juggling multiple credentials) +- Works for both model inference and sandbox access +- Manage everything from one dashboard at cua.ai + +**Smart Routing** + +- Automatic provider selection for optimal availability and performance +- For Anthropic models, we route to the best provider (Anthropic, AWS Bedrock, or Microsoft Foundry) +- No configuration neededβ€”just specify the model and we handle the rest + +**Cost Tracking & Optimization** + +- Unified usage dashboard across all models +- Real-time credit balance tracking +- Detailed cost breakdown per request (gateway cost + upstream cost) + +**Production-Ready** + +- OpenAI-compatible API (drop-in replacement for existing code) +- Full streaming support with Server-Sent Events +- Metadata about routing decisions in every response + +## Available Models (Launch) + +We're starting with Anthropic's latest Claude models: + +| Model | Best For | +| --------------------------------- | ---------------------------------- | +| `cua/anthropic/claude-sonnet-4.5` | General-purpose tasks, recommended | +| `cua/anthropic/claude-haiku-4.5` | Fast responses, cost-effective | + +## How It Works + +When you request an Anthropic model through Cua, we automatically route to the best available providerβ€”whether that's Anthropic directly, AWS Bedrock, or Microsoft Foundry. You just specify `cua/anthropic/claude-sonnet-4.5`, and we handle the provider selection, failover, and optimization behind the scenes. No need to manage multiple accounts or implement fallback logic yourself. + +## Getting Started + +Sign up at [cua.ai/signin](https://cua.ai/signin) and create your API key from **Dashboard > API Keys > New API Key** (save it immediatelyβ€”you won't see it again). + + +Use it with the Agent SDK (make sure to set your environment variable): + +```python +import asyncio +from agent import ComputerAgent +from computer import Computer + +async def main(): + # Initialize cloud computer + computer = Computer( + os_type="linux", + provider_type="cloud", + name="your-container-name", + api_key="your-cua-api-key" + ) + + # Initialize agent with Claude Sonnet 4.5 + agent = ComputerAgent( + tools=[computer], + model="cua/anthropic/claude-sonnet-4.5", + api_key="your-cua-api-key", + instructions="You are a helpful assistant that can control computers", + only_n_most_recent_images=3 + ) + + # Run a task + async for result in agent.run("Open a browser and search for Python tutorials"): + print(result) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Migration is Simple + +Already using Anthropic directly? Just add the `cua/` prefix: + +**Before:** + +```python +export ANTHROPIC_API_KEY="sk-ant-..." +agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929") +``` + +**After:** + +```python +export CUA_API_KEY="sk_cua-api01_..." +agent = ComputerAgent(model="cua/anthropic/claude-sonnet-4.5") +``` + +Same code structure. No other changes needed. + +## Direct API Access + +The router exposes an OpenAI-compatible API at `https://inference.cua.ai/v1`: + +```bash +curl -X POST https://inference.cua.ai/v1/chat/completions \ + -H "Authorization: Bearer ${CUA_API_KEY}" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "anthropic/claude-sonnet-4.5", + "messages": [{"role": "user", "content": "Hello!"}], + "stream": true + }' +``` + +Works with any OpenAI-compatible client library. + +## FAQs + +
+Do I still need provider API keys? + +No. Cua manages all provider API keys and infrastructure. You only need one Cua API key for everythingβ€”model inference and sandbox access. + +
+ +
+How does pricing work? + +Requests are billed in credits, deducted from your Cua account balance. Every response includes both the Cua gateway cost and the actual upstream API cost for transparency. + +
+ +
+Can I still use my own Anthropic key (BYOK)? + +Yes. The agent SDK still supports direct provider access. Just use `anthropic/claude-sonnet-4-5-20250929` instead of the `cua/` prefix and set your `ANTHROPIC_API_KEY`. See [Supported Model Providers](https://cua.ai/docs/agent-sdk/supported-model-providers/) for details. + +
+ +
+What about other providers? + +We're starting with Anthropic and adding more providers based on what people actually use. Request access to specific models in [Discord](https://discord.gg/cua-ai). + +
+ +
+Does streaming work? + +Yes. Set `"stream": true` in your request to receive Server-Sent Events. Works identically to OpenAI's streaming API. + +
+ +## What's Next + +This is just the beginning. We're actively iterating based on feedback: + +- Additional model providers +- Custom model routing rules +- Usage alerts and budget controls +- Team collaboration features + +If there's a model or feature you need, let us know in [Discord](https://discord.gg/cua-ai). + +## Need Help? + +- **Documentation**: [cua.ai/docs/agent-sdk/supported-model-providers/cua-vlm-router](https://cua.ai/docs/agent-sdk/supported-model-providers/cua-vlm-router) +- **Quickstart Guide**: [cua.ai/docs/get-started/quickstart](https://cua.ai/docs/get-started/quickstart) +- **Discord Community**: [discord.gg/cua-ai](https://discord.gg/cua-ai) + +--- + +Get started at [cua.ai](https://cua.ai) or check out the [VLM Router docs](https://cua.ai/docs/agent-sdk/supported-model-providers/cua-vlm-router). diff --git a/blog/hud-agent-evals.md b/blog/hud-agent-evals.md index d28ab27e..594cea2c 100644 --- a/blog/hud-agent-evals.md +++ b/blog/hud-agent-evals.md @@ -58,7 +58,7 @@ await run_full_dataset( # Or test on SheetBench (50 spreadsheet tasks) await run_full_dataset( dataset="hud-evals/SheetBench-V2", - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", split="train[:2]" ) ``` diff --git a/blog/introducing-cua-cli.md b/blog/introducing-cua-cli.md new file mode 100644 index 00000000..7c3742e6 --- /dev/null +++ b/blog/introducing-cua-cli.md @@ -0,0 +1,264 @@ +# Introducing the Cua CLI: Manage Cloud Sandboxes from Your Terminal + +If you've been using our Cloud Sandboxes, you've probably been managing them through the web dashboard - clicking through forms to create instances, copying credentials, manually starting and stopping sandboxes. It works, but it's not exactly built for power users like yourself. + +Today we're launching the **Cua CLI**: a command-line interface that brings the full power of our Cloud Sandbox platform to your terminal. Create, manage, and connect to Linux, Windows, or macOS sandboxes in secondsβ€”all from a single command. + +![Cua CLI Banner](https://github.com/user-attachments/assets/f8358acf-9194-46ee-b9e3-50cfcff5e489) + +## What You Can Do + +The Cua CLI handles everything you need to work with Cloud Sandboxes: + +**Authentication** + +- Browser-based OAuth login with automatic credential storage +- Direct API key support for CI/CD pipelines +- Export credentials to `.env` files for SDK integration + +**Sandbox Management** + +- Create sandboxes with your choice of OS, size, and region +- List all your sandboxes with status and connection details +- Start, stop, restart, and delete sandboxes +- Open remote desktop (VNC) connections directly in your browser + +**Two Command Styles** +The CLI supports both flat and grouped command structuresβ€”use whichever fits your workflow: + +```bash +# Grouped style (explicit & clear) +cua sb ls +cua sb create --os linux --size small --region north-america +cua sb vnc my-sandbox + +# Flat style (quick & concise) +cua ls +cua create --os linux --size small --region north-america +cua vnc my-sandbox +``` + +Both styles work identically. The CLI shows grouped commands in help by default, but all flat commands remain available for backwards compatibility. + +## Installation + +One command installs everything (includes Bun runtime + Cua CLI): + +```bash +# macOS/Linux +curl -LsSf https://cua.ai/cli/install.sh | sh + +# Windows +powershell -ExecutionPolicy ByPass -c "irm https://cua.ai/cli/install.ps1 | iex" +``` + +Or install via npm if you prefer: + +```bash +npm install -g @trycua/cli +``` + +## Getting Started + +Authenticate with your Cua account: + +```bash +# Interactive browser login (recommended) +cua auth login + +# Or provide your API key directly +cua auth login --api-key sk-your-api-key-here +``` + +Create a sandbox: + +```bash +cua sb create --os linux --size small --region north-america +# Sandbox created and ready: my-sandbox-abc123 +# Password: secure-password-here +# Host: my-sandbox-abc123.sandbox.cua.ai +``` + +List your sandboxes: + +```bash +cua sb list +# NAME STATUS HOST +# my-sandbox-abc123 running my-sandbox-abc123.sandbox.cua.ai +# test-windows-456 stopped test-windows-456.sandbox.cua.ai +``` + +Open a remote desktop: + +```bash +cua sb vnc my-sandbox-abc123 +# Opens your browser to the VNC interface with password pre-filled +``` + +## SDK Integration + +Export your API key to a `.env` file for seamless SDK integration: + +```bash +cd my-project +cua auth env +# Wrote /path/to/my-project/.env +``` + +Then use it with our Python or TypeScript SDKs: + +```python +from computer import Computer + +computer = Computer( + os_type="linux", + provider_type="cloud", + name="my-sandbox-abc123", + api_key="your-api-key" # Or load from .env +) + +await computer.run() +``` + +## Sandbox Sizes & Regions + +Create sandboxes in the size and region that fits your needs: + +**Sizes:** + +- `small` - 2 cores, 8 GB RAM, 128 GB SSD +- `medium` - 4 cores, 16 GB RAM, 128 GB SSD +- `large` - 8 cores, 32 GB RAM, 256 GB SSD + +**Regions:** + +- `north-america` +- `europe` +- `asia-pacific` +- `south-america` + +**OS Options:** + +- `linux` - Ubuntu with XFCE desktop +- `windows` - Windows 11 with Edge and Python +- `macos` - macOS (preview access) + +## Example Workflows + +**Quick Testing Environment** + +```bash +# Spin up a sandbox, test something, tear it down +cua sb create --os linux --size small --region north-america +# ... do your testing ... +cua sb delete my-sandbox-abc123 +``` + +**Persistent Development Sandbox** + +```bash +# Create a sandbox for long-term use +cua sb create --os linux --size medium --region north-america + +# Stop it when not in use (data persists) +cua sb stop my-sandbox-abc123 + +# Start it again when needed +cua sb start my-sandbox-abc123 +``` + +**CI/CD Integration** + +```bash +# Provision sandboxes in your pipeline +export CUA_API_KEY="sk-your-api-key" +cua auth login --api-key "$CUA_API_KEY" +cua sb create --os linux --size large --region north-america + +# Run your tests with the Cua Computer SDK +python run_tests.py + +# Clean up +cua sb delete my-test-sandbox +``` + +## Command Aliases + +We've added aliases for common commands to speed up your workflow: + +```bash +# List aliases +cua list # or: cua ls, cua ps, cua sb list + +# VNC aliases +cua vnc # or: cua open, cua sb vnc +``` + +## FAQs + +
+Can I use this in scripts and CI/CD? + +Yes. All commands support non-interactive mode with `--api-key` flags, and the CLI exits with proper status codes for scripting. The flat command style (`cua list`, `cua create`) is particularly useful for quick scripts. + +
+ +
+Where are my credentials stored? + +API keys are stored in `~/.cua/cli.sqlite` using a local SQLite database. They never leave your machine. Use `cua auth logout` to clear stored credentials. + +
+ +
+What happens to passwords in the output? + +Passwords are hidden by default in `cua list` for security. Use `cua list --show-passwords` to display them when needed. + +
+ +
+Can I manage sandboxes created through the web dashboard? + +Yes. The CLI and dashboard share the same API. Any sandbox you create in the dashboard will show up in `cua list`, and vice versa. + +
+ +
+How do I update the CLI? + +If you installed via script: + +```bash +curl -LsSf https://cua.ai/cli/install.sh | sh +``` + +If you installed via npm: + +```bash +npm install -g @trycua/cli@latest +``` + +
+ +## What's Next + +We're actively iterating based on feedback. Planned features include: + +- SSH key management for secure sandbox access +- Template-based sandbox creation +- Batch operations (start/stop multiple sandboxes) +- Custom sandbox configurations +- Snapshot management + +If there's a feature you need, let us know in [Discord](https://discord.gg/cua-ai). + +## Need Help? + +- **Documentation**: [https://cua.ai/docs/libraries/cua-cli/commands](https://cua.ai/docs/libraries/cua-cli/commands) +- **Installation Guide**: [https://cua.ai/docs/libraries/cua-cli/installation](https://cua.ai/docs/libraries/cua-cli/installation) +- **Discord Community**: [https://discord.gg/cua-ai](https://discord.gg/cua-ai) + +--- + +Get started at [cua.ai](https://cua.ai) or check out the [quickstart guide](https://cua.ai/docs/get-started/quickstart). diff --git a/blog/lume-to-containerization.md b/blog/lume-to-containerization.md index e6e0f134..d420b031 100644 --- a/blog/lume-to-containerization.md +++ b/blog/lume-to-containerization.md @@ -90,7 +90,7 @@ lume run macos-sequoia-vanilla:latest ### Lumier: Docker-Style VM Management -[Lumier](https://github.com/trycua/lumier) works differently. It lets you use Docker commands to manage VMs. But here's the key: **Docker is just for packaging, not for isolation**. +[Lumier](https://github.com/trycua/cua/tree/main/libs/lumier) works differently. It lets you use Docker commands to manage VMs. But here's the key: **Docker is just for packaging, not for isolation**. What makes Lumier useful: diff --git a/blog/neurips-2025-cua-papers.md b/blog/neurips-2025-cua-papers.md new file mode 100644 index 00000000..dd5e9461 --- /dev/null +++ b/blog/neurips-2025-cua-papers.md @@ -0,0 +1,658 @@ +# NeurIPS 2025: 45 Computer-Use Agent Papers You Should Know About + +neurips + +If you're following the computer-use agent space, you already know that NeurIPS is where the most important work gets presented. But with thousands of papers across every area of machine learning, finding the ones relevant to CUAs means hours of filtering through proceedings, skimming abstracts, and hoping you don't miss something important. + +We did that work for you. We're excited to announce that **Cua will be at NeurIPS 2025**, and we've compiled a curated list of **45 papers** focused specifically on Computer-Use Agentsβ€”covering benchmarks, safety, grounding, visual reasoning, and agent architectures. + +## Why This Matters + +Computer-use agents are evolving rapidly. This year's NeurIPS showcases several important developments: + +**The benchmark landscape is maturing.** We're seeing comprehensive evaluations across macOS (macOSWorld), professional tools (VideoCAD), and real-world websites (REAL, TheAgentCompany). These aren't toy problems anymoreβ€”they're measuring what agents can actually do in production environments. + +**Safety is becoming a first-class concern.** Multiple papers (OS-Harm, RiOSWorld, WASP, AgentDAM) are systematically documenting how agents fail when confronted with adversarial inputs, privacy requirements, or misuse scenarios. The findings are sobering: even frontier models often comply with harmful requests. + +**Grounding remains the bottleneck.** Papers like GUI-Actor, GUI-G1, and SE-GUI are pushing the state of the art on mapping language to UI actions. The best approaches are achieving significant gains with surprisingly small models and datasets. + +**Open-source is catching up.** OpenCUA's 72B model hits 45% on OSWorld-Verified, establishing that community-driven development can compete with proprietary systems. + +## Highlights Worth Your Attention + +A few papers stand out for their immediate relevance to anyone building or deploying computer-use agents: + +- **macOSWorld** reveals a dramatic capability gap: proprietary agents achieve 30%+ success on macOS tasks while open-source models struggle below 5%. +- **TheAgentCompany** simulates a software company where agents browse, code, and communicate. The best agent completes 30% of tasks autonomously. +- **WASP** demonstrates that simple prompt injections deceive top-tier models in 86% of cases. +- **GUI-G1** shows that a 3B model can achieve 90.3% on ScreenSpot by fixing issues with chain-of-thought reasoning. + +## Summary Statistics + +| Category | Count | +|----------|-------| +| Benchmarks & Datasets | 18 | +| Safety & Security | 12 | +| Grounding & Visual Reasoning | 14 | +| Agent Architectures & Training | 11 | +| Adversarial Attacks | 8 | + +**Total Papers:** 45 + +## Meet Us at NeurIPS + +We'll be at NeurIPS in San Diego. If you're working on computer-use agents, building applications on top of CUA infrastructure, or just curious about where this space is heading, we'd love to connect. + +- **Book a Meeting**: [cal.com/cua/neurips-slot](https://cal.com/cua/neurips-slot) +- **X/Twitter**: [@trycua](https://x.com/trycua) +- **Discord**: [discord.gg/cua-ai](https://discord.gg/cua-ai) + +--- + +# The Papers + +## 1. macOSWorld: A Multilingual Interactive Benchmark for GUI Agents + +**Summary:** The first comprehensive benchmark for evaluating GUI agents on macOS. Features 202 multilingual interactive tasks across 30 applications (28 macOS-exclusive), with support for 5 languages (English, Chinese, Arabic, Japanese, Russian). Reveals a dramatic gap: proprietary agents achieve 30%+ success rate while open-source models lag below 5%. Also includes safety benchmarking for deception attacks. + +**Key Findings:** +- Proprietary computer-use agents lead at above 30% success rate +- Open-source lightweight models struggle below 5%, highlighting need for macOS domain adaptation +- Multilingual benchmarks expose weaknesses, especially in Arabic (28.8% degradation vs English) +- Deception attacks are a general vulnerability requiring immediate attention + +**Poster:** https://neurips.cc/virtual/2025/poster/117427 + +--- + +## 2. OS-Harm: A Benchmark for Measuring Safety of Computer Use Agents + +**Summary:** A comprehensive safety benchmark built on OSWorld for testing computer-use agents across three harm categories: deliberate user misuse, prompt injection attacks, and model misbehavior. Includes 150 tasks spanning harassment, copyright infringement, disinformation, data exfiltration, and more. Proposes an automated judge achieving high agreement with human annotations (0.76-0.79 F1 score). + +**Key Findings:** +- All tested models (o4-mini, Claude 3.7 Sonnet, Gemini 2.5 Pro) tend to directly comply with many deliberate misuse queries +- Models are relatively vulnerable to static prompt injections +- Models occasionally perform unsafe actions without explicit malicious prompts + +**Poster:** https://neurips.cc/virtual/2025/loc/san-diego/poster/121772 + +--- + +## 3. OpenCUA: Open Foundations for Computer-Use Agents + +**Summary:** A comprehensive open-source framework for scaling computer-use agent data and foundation models. Introduces AgentNet, the first large-scale computer-use task dataset spanning 3 operating systems and 200+ applications/websites. OpenCUA-72B achieves 45% success rate on OSWorld-Verified, establishing new state-of-the-art among open-source models. + +**Key Contributions:** +- Annotation infrastructure for capturing human computer-use demonstrations +- AgentNet: large-scale dataset across 3 OSes and 200+ apps +- Scalable pipeline transforming demonstrations into state-action pairs with reflective Chain-of-Thought reasoning +- Models generalize well across domains and benefit from increased test-time computation + +**Poster:** https://neurips.cc/virtual/2025/poster/119771 + +--- + +## 4. Mind2Web 2: Evaluating Agentic Search with Agent-as-a-Judge + +**Summary:** A benchmark of 130 realistic, high-quality, long-horizon tasks for agentic search systems (like Deep Research), requiring real-time web browsing and extensive information synthesis. Constructed with 1000+ hours of human labor. Introduces Agent-as-a-Judge framework using tree-structured rubric design for automated evaluation. + +**Key Findings:** +- OpenAI Deep Research achieves 50-70% of human performance while spending half the time +- First systematic evaluation of ten frontier agentic search systems vs. human performance +- Addresses the challenge of evaluating time-varying, complex answers + +**Poster:** https://neurips.cc/virtual/2025/poster/121798 + +--- + +## 5. Scaling Computer-Use Grounding via User Interface Decomposition and Synthesis + +**Summary:** Addresses GUI groundingβ€”mapping natural language to specific UI actionsβ€”as a critical bottleneck in agent development. Introduces OSWorld-G benchmark (564 annotated samples) and Jedi dataset (4 million synthetic examples), the largest computer-use grounding dataset. Improved grounding directly enhances agentic capabilities, boosting OSWorld performance from 23% to 51%. + +**Key Contributions:** +- OSWorld-G: comprehensive benchmark for diverse grounding tasks (text matching, element recognition, layout understanding, precise manipulation) +- Jedi: 4M examples through multi-perspective task decoupling +- Demonstrates compositional generalization to novel interfaces + +**Poster:** https://neurips.cc/virtual/2025/poster/121759 + +--- + +## 6. RiOSWorld: Benchmarking the Risk of Multimodal Computer-Use Agents + +**Summary:** Evaluates potential safety risks of MLLM-based agents during real-world computer manipulation. Features 492 risky tasks spanning web, social media, multimedia, OS, email, and office software. Categorizes risks into user-originated and environmental risks, evaluating both risk goal intention and completion. + +**Key Findings:** +- Current computer-use agents face significant safety risks in real-world scenarios +- Safety principles designed for dialogue scenarios don't transfer well to computer-use +- Highlights necessity and urgency of safety alignment for computer-use agents + +**Poster:** https://neurips.cc/virtual/2025/poster/117273 + +--- + +## 7. REAL: Benchmarking Autonomous Agents on Deterministic Simulations of Real Websites + +**Summary:** A benchmark featuring high-fidelity, deterministic replicas of 11 widely-used websites across e-commerce, travel, communication, and professional networking. Contains 112 practical tasks requiring both information retrieval and state-changing actions. Enables reproducible evaluation without safety risks. + +**Key Findings:** +- Best frontier language models achieve only 41% success rate +- Highlights critical gaps in autonomous web navigation and task completion +- Supports scalable post-training data generation + +**Poster:** https://neurips.cc/virtual/2025/poster/121619 + +--- + +## 8. SE-GUI: Enhancing Visual Grounding for GUI Agents via Self-Evolutionary Reinforcement Learning + +**Summary:** An RL-based framework for GUI grounding incorporating seed data curation, dense policy gradients, and self-evolutionary reinforcement finetuning using attention maps. With only 3K training samples, the 7B model achieves state-of-the-art on three grounding benchmarks, outperforming UI-TARS-72B by 24.2% on ScreenSpot-Pro. + +**Key Results:** +- 47.3% accuracy on ScreenSpot-Pro with 7B model +- Outperforms 72B models with fraction of training data +- Demonstrates effectiveness of RL for high-resolution, complex environments + +**Poster:** https://neurips.cc/virtual/2025/poster/118788 + +--- + +## 9. TRAP: Targeted Redirecting of Agentic Preferences + +**Summary:** A generative adversarial framework that manipulates agent decision-making using diffusion-based semantic injections. Combines negative prompt degradation with positive semantic optimization. Without model access, produces visually natural images that induce consistent decision biases in agents. + +**Key Findings:** +- Consistently induces decision-level preference redirection on LLaVA-34B, Gemma3, GPT-4o, and Mistral-3.2 +- Outperforms baselines (SPSA, Bandit, standard diffusion) +- Exposes vulnerability: autonomous agents can be misled through visually subtle, semantically-guided manipulations + +**Poster:** https://neurips.cc/virtual/2025/poster/117547 + +--- + +## 10. TheAgentCompany: Benchmarking LLM Agents on Consequential Real World Tasks + +**Summary:** An extensible benchmark simulating a small software company environment where AI agents interact like digital workers: browsing the web, writing code, running programs, and communicating with coworkers. Tests agents on real professional tasks with important implications for industry adoption and labor market effects. + +**Key Findings:** +- Best agent achieves 30% autonomous task completion +- Simpler tasks are solvable autonomously +- More difficult long-horizon tasks remain beyond current systems' reach + +**Poster:** https://neurips.cc/virtual/2025/poster/121705 + +--- + +## 11. VideoGameQA-Bench: Evaluating Vision-Language Models for Video Game Quality Assurance + +**Summary:** A comprehensive benchmark for VLMs in video game QA, encompassing visual unit testing, visual regression testing, needle-in-a-haystack challenges, glitch detection, and bug report generation for both images and videos. Addresses the need for standardized benchmarks in this labor-intensive domain. + +**Key Focus:** +- First benchmark specifically designed for video game QA with VLMs +- Covers wide range of QA activities across images and videos +- Addresses lack of automation in game development workflows + +**Poster:** https://neurips.cc/virtual/2025/poster/121740 + +--- + +## 12. WASP: Benchmarking Web Agent Security Against Prompt Injection Attacks + +**Summary:** End-to-end benchmark for evaluating web agent security against prompt injection attacks. Tests realistic scenarios where even simple, low-effort human-written injections can deceive top-tier AI models including those with advanced reasoning. + +**Key Findings:** +- Attacks partially succeed in up to 86% of cases +- State-of-the-art agents often struggle to fully complete attacker goals +- Reveals "security by incompetence"β€”agents' limitations sometimes prevent full attack success + +**Poster:** https://neurips.cc/virtual/2025/poster/121728 + +--- + +## 13. AgentDAM: Privacy Leakage Evaluation for Autonomous Web Agents + +**Summary:** Measures whether AI web-navigation agents follow the privacy principle of "data minimization"β€”using sensitive information only when truly necessary to complete a task. Simulates realistic web interaction scenarios end-to-end. + +**Key Findings:** +- Agents built on GPT-4, Llama-3, and Claude are prone to inadvertent use of unnecessary sensitive information +- Proposes prompting-based defense that reduces information leakage +- End-to-end benchmarking provides more realistic measure than probing LLMs about privacy + +**Poster:** https://neurips.cc/virtual/2025/poster/121443 + +--- + +## 14. Embodied Web Agents: Bridging Physical-Digital Realms for Integrated Agent Intelligence + +**Summary:** A novel paradigm for AI agents that fluidly bridge embodiment and web-scale reasoning. Creates unified simulation integrating realistic 3D indoor/outdoor environments with functional web interfaces. Tasks include cooking from online recipes, navigating with dynamic map data, and interpreting landmarks using web knowledge. + +**Key Contributions:** +- Unified platform combining 3D environments with web interfaces +- Benchmark spanning cooking, navigation, shopping, tourism, and geolocation +- Reveals significant performance gaps between AI systems and humans + +**Poster:** https://neurips.cc/virtual/2025/poster/121809 + +--- + +## 15. VideoCAD: A Dataset and Model for Learning Long-Horizon 3D CAD UI Interactions from Video + +**Summary:** The first attempt to model UI interactions for precision engineering tasks. Features 41K+ annotated video recordings of CAD operations with time horizons up to 20x longer than existing datasets. Proposes VideoCADFormer for learning CAD interactions directly from video. + +**Key Contributions:** +- Large-scale synthetic dataset for CAD UI interactions +- VQA benchmark for evaluating spatial reasoning and video understanding +- Reveals challenges in precise action grounding and long-horizon dependencies + +**Poster:** https://neurips.cc/virtual/2025/poster/121820 + +--- + +## 16. Look Before You Leap: A GUI-Critic-R1 Model for Pre-Operative Error Diagnosis + +**Summary:** Introduces a pre-operative critic mechanism that provides feedback before action execution by reasoning about potential outcomes. Proposes Suggestion-aware Group Relative Policy Optimization (S-GRPO) for building the GUI-Critic-R1 model with fully automated data generation. + +**Key Results:** +- Significant advantages in critic accuracy compared to current MLLMs +- Improved success rates and operational efficiency on GUI automation benchmarks +- Works across both mobile and web domains + +**Poster:** https://neurips.cc/virtual/2025/poster/115566 + +--- + +## 17. Grounded Reinforcement Learning for Visual Reasoning (ViGoRL) + +**Summary:** A vision-language model trained with RL to explicitly anchor each reasoning step to specific visual coordinates. Introduces multi-turn RL framework enabling dynamic zooming into predicted coordinates during reasoning. + +**Key Results:** +- 86.4% on V*Bench for visual search +- Outperforms supervised fine-tuning and conventional RL across spatial reasoning, visual search, and web-based grounding +- Grounding amplifies region exploration, subgoal setting, and visual verification + +**Poster:** https://neurips.cc/virtual/2025/poster/120218 + +--- + +## 18. GUI-Actor: Coordinate-Free Visual Grounding for GUI Agents + +**Summary:** A VLM-based method for coordinate-free GUI grounding using an attention-based action head. Enables proposing one or more action regions in a single forward pass with a grounding verifier for selection. + +**Key Results:** +- GUI-Actor-7B achieves 44.6 on ScreenSpot-Pro with Qwen2.5-VL, outperforming UI-TARS-72B (38.1) +- Improved generalization to unseen resolutions and layouts +- Fine-tuning only ~100M parameters achieves SOTA performance + +**Poster:** https://neurips.cc/virtual/2025/poster/119841 + +--- + +## 19. GUI-G1: Understanding R1-Zero-Like Training for Visual Grounding in GUI Agents + +**Summary:** Extensive analysis of the R1-Zero paradigm (online RL + chain-of-thought reasoning) for GUI grounding. Identifies issues: longer reasoning chains lead to worse performance, reward hacking via box size exploitation, and overfitting easy examples. + +**Solutions Proposed:** +- Fast Thinking Template for direct answer generation +- Box size constraint in reward function +- Difficulty-aware scaling in RL objective + +**Key Results:** +- GUI-G1-3B achieves 90.3% on ScreenSpot and 37.1% on ScreenSpot-Pro +- Outperforms larger UI-TARS-7B with only 3B parameters + +**Poster:** https://neurips.cc/virtual/2025/poster/120227 + +--- + +## 20. GUI-Reflection: Empowering Multimodal GUI Models with Self-Reflection Behavior + +**Summary:** Framework integrating self-reflection and error correction into end-to-end multimodal GUI models through GUI-specific pre-training, offline SFT, and online reflection tuning. Enables self-reflection emergence with fully automated data generation. + +**Key Contributions:** +- Scalable pipelines for automatic reflection/correction data from successful trajectories +- GUI-Reflection Task Suite for reflection-oriented abilities +- Diverse environment for online training on mobile devices +- Iterative online reflection tuning algorithm + +**Poster:** https://neurips.cc/virtual/2025/poster/115826 + +--- + +## 21. InfantAgent-Next: A Multimodal Generalist Agent for Automated Computer Interaction + +**Summary:** A generalist agent capable of multimodal computer interaction (text, images, audio, video). Integrates tool-based and pure vision agents within highly modular architecture, enabling collaborative step-by-step task solving. + +**Key Results:** +- 7.27 accuracy gain over Claude-Computer-Use on OSWorld +- Evaluated on pure vision benchmarks (OSWorld), general benchmarks (GAIA), and tool-intensive benchmarks (SWE-Bench) +- Demonstrates value of modular, collaborative agent architecture + +**Poster:** https://neurips.cc/virtual/2025/poster/118379 + +--- + +## 22. AdvEDM: Fine-grained Adversarial Attack against VLM-based Embodied Agents + +**Summary:** A fine-grained adversarial attack framework that modifies VLM perception of only key objects while preserving semantics of remaining regions. Unlike broad semantic disruption, this targeted approach reduces conflicts with task context, making VLMs output valid but incorrect decisions that affect agent actions in the physical world. + +**Key Contributions:** +- AdvEDM-R: removes semantics of specific objects from images +- AdvEDM-A: adds semantics of new objects into images +- Demonstrates fine-grained control with excellent attack performance in embodied decision-making tasks + +**Poster:** https://neurips.cc/virtual/2025/poster/116436 + +--- + +## 23. BLINK-Twice: A Reasoning Benchmark on Visual Perception + +**Summary:** A vision-centric reasoning benchmark grounded in challenging perceptual tasks. Unlike prior benchmarks, it moves beyond shallow perception ("see") to require fine-grained observation and analytical reasoning ("observe"). Features natural adversarial image pairs and annotated reasoning chains for process evaluation. + +**Key Findings:** +- Tests 20 leading MLLMs including 12 foundation models and 8 reasoning-enhanced models +- Existing reasoning strategies (chain-of-thought, self-criticism) result in unstable and redundant reasoning +- Repeated image observation improves performance across models +- Active visual interaction (as in o3) highlights need for new vision reasoning paradigm + +**Poster:** https://neurips.cc/virtual/2025/poster/121522 + +--- + +## 24. BadVLA: Backdoor Attacks on Vision-Language-Action Models + +**Summary:** First systematic investigation of backdoor vulnerabilities in VLA models. Proposes Objective-Decoupled Optimization with two stages: explicit feature-space separation to isolate trigger representations, and conditional control deviations activated only by triggers. + +**Key Findings:** +- Consistently achieves near-100% attack success rates with minimal impact on clean task accuracy +- Robust against common input perturbations, task transfers, and model fine-tuning +- Exposes critical security vulnerabilities in current VLA deployments under Training-as-a-Service paradigm + +**Poster:** https://neurips.cc/virtual/2025/poster/115803 + +--- + +## 25. Benchmarking Egocentric Multimodal Goal Inference for Assistive Wearable Agents + +**Summary:** Benchmark for proactively inferring user goals from multimodal contextual observations for wearable assistant agents (smart glasses). Dataset comprises ~30 hours from 363 participants across 3,482 recordings with visual, audio, digital, and longitudinal context. + +**Key Findings:** +- Humans achieve 93% MCQ accuracy; best VLM reaches ~84% +- For open-ended generation, best models produce relevant goals only ~57% of the time +- Smaller models (suited for wearables) achieve ~49% accuracy +- Models benefit from relevant modalities but struggle with noisy ones + +**Poster:** https://neurips.cc/virtual/2025/poster/121655 + +--- + +## 26. GAM-Agent: Game-Theoretic Multi-Agent Framework for Visual Reasoning + +**Summary:** A game-theoretic multi-agent framework formulating reasoning as a non-zero-sum game between base agents (visual perception specialists) and a critical agent (logic/fact verification). Features uncertainty-aware controller for dynamic agent collaboration with multi-round debates. + +**Key Results:** +- Boosts small-to-mid scale models (Qwen2.5-VL-7B, InternVL3-14B) by 5-6% +- Enhances strong models like GPT-4o by 2-3% +- Modular, scalable, and generalizable framework + +**Poster:** https://neurips.cc/virtual/2025/poster/119144 + +--- + +## 27. GRIT: Teaching MLLMs to Think with Images + +**Summary:** Introduces Grounded Reasoning with Images and Textsβ€”a method for training MLLMs to generate reasoning chains interleaving natural language with explicit bounding box coordinates. Uses GRPO-GR reinforcement learning with rewards focused on answer accuracy and grounding format. + +**Key Contributions:** +- Exceptional data efficiency: requires as few as 20 image-question-answer triplets +- Successfully unifies reasoning and grounding abilities +- Eliminates need for reasoning chain annotations or explicit bounding box labels + +**Poster:** https://neurips.cc/virtual/2025/poster/118020 + +--- + +## 28. Safe RLHF-V: Safe Reinforcement Learning from Multi-modal Human Feedback + +**Summary:** First multimodal safety alignment framework. Introduces BeaverTails-V (first dataset with dual preference annotations for helpfulness and safety), and Beaver-Guard-V (multi-level guardrail system defending against unsafe queries and adversarial attacks). + +**Key Results:** +- Guard model improves precursor model's safety by average of 40.9% over five filtering rounds +- Safe RLHF-V enhances model safety by 34.2% and helpfulness by 34.3% +- First exploration of multi-modal safety alignment within constrained optimization + +**Poster:** https://neurips.cc/virtual/2025/poster/118304 + +--- + +## 29. Dropout Decoding: Uncertainty-Guided Token Dropout for LVLM Reliability + +**Summary:** An inference-time approach that quantifies visual token uncertainty and selectively masks uncertain tokens. Decomposes uncertainty into aleatoric and epistemic components, focusing on epistemic uncertainty for perception-related errors. + +**Key Results:** +- Significantly reduces object hallucinations +- Enhances reliability and quality of LVLM outputs across diverse visual contexts +- Validated on CHAIR, THRONE, and MMBench benchmarks + +**Poster:** https://neurips.cc/virtual/2025/poster/118572 + +--- + +## 30. FOCUS: Unified Vision-Language Modeling for Interactive Editing + +**Summary:** A unified LVLM integrating segmentation-aware perception and controllable object-centric generation. Uses dual-branch visual encoder for global semantic context and fine-grained spatial details, with MoVQGAN-based visual tokenizer for discrete visual tokens. + +**Key Contributions:** +- Progressive multi-stage training pipeline +- Segmentation masks jointly optimized as spatial condition prompts +- Bridges segmentation-aware perception with fine-grained visual synthesis + +**Poster:** https://neurips.cc/virtual/2025/poster/119062 + +--- + +## 31. Fine-Grained Preference Optimization for Spatial Reasoning (SpatialReasoner-R1) + +**Summary:** Introduces Multi-Model Monte Carlo Tree Search (M3CTS) for generating diverse Long Chain-of-Thought reasoning trajectories. Proposes fine-grained Direct Preference Optimization (fDPO) with segment-specific preference granularity guided by spatial reward mechanism. + +**Key Results:** +- fDPO achieves 4.1% and 9.0% gains over standard DPO on spatial quality and quantity tasks +- SpatialReasoner-R1 sets new SOTA on SpatialRGPT-Bench, outperforming strongest baseline by 9.8% +- Maintains competitive performance on general vision-language tasks + +**Poster:** https://neurips.cc/virtual/2025/poster/118573 + +--- + +## 32. Reason-RFT: Reinforcement Fine-Tuning for Visual Reasoning + +**Summary:** A two-stage reinforcement fine-tuning framework: SFT with curated Chain-of-Thought data activates reasoning potential, followed by RL based on Group Relative Policy Optimization (GRPO) for domain shift adaptability. + +**Key Advantages:** +- State-of-the-art results outperforming both open-source and proprietary models +- Robust performance under domain shifts across various tasks +- Excellent data efficiency in few-shot learning scenarios + +**Poster:** https://neurips.cc/virtual/2025/poster/118345 + +--- + +## 33. Safe + Safe = Unsafe? Exploiting Safe Images to Jailbreak LVLMs + +**Summary:** Reveals that safe images can be exploited for jailbreaking when combined with additional safe images and prompts, exploiting LVLMs' universal reasoning capabilities and safety snowball effect. Proposes Safety Snowball Agent (SSA) framework. + +**Key Findings:** +- SSA can use nearly any image to induce LVLMs to produce unsafe content +- Achieves high jailbreak success rates against latest LVLMs +- Exploits inherent LVLM properties rather than alignment flaws + +**Poster:** https://neurips.cc/virtual/2025/loc/san-diego/poster/116422 + +--- + +## 34. MIP against Agent: Malicious Image Patches Hijacking Multimodal OS Agents + +**Summary:** Uncovers novel attack vector: Malicious Image Patches (MIPs)β€”adversarially perturbed screen regions that induce OS agents to perform harmful actions. MIPs can be embedded in wallpapers or shared on social media to exfiltrate sensitive data. + +**Key Findings:** +- MIPs generalize across user prompts and screen configurations +- Can hijack multiple OS agents during execution of benign instructions +- Exposes critical security vulnerabilities requiring attention before widespread deployment + +**Poster:** https://neurips.cc/virtual/2025/loc/san-diego/poster/117813 + +--- + +## 35. CogVLA: Cognition-Aligned Vision-Language-Action Models + +**Summary:** A framework leveraging instruction-driven routing and sparsification for VLA efficiency. Features 3-stage progressive architecture inspired by human multimodal coordination: Encoder-FiLM Aggregation Routing, LLM-FiLM Pruning Routing, and V-L-A Coupled Attention. + +**Key Results:** +- 97.4% success rate on LIBERO benchmark, 70.0% on real-world robotic tasks +- Reduces training costs by 2.5x and inference latency by 2.8x compared to OpenVLA +- Achieves state-of-the-art performance + +**Poster:** https://neurips.cc/virtual/2025/poster/119023 + +--- + +## 36. Succeed or Learn Slowly (SoLS): Sample Efficient RL for Mobile App Control + +**Summary:** Novel off-policy RL algorithm applying direct policy updates for positive samples and conservative, regularized updates for negative ones. Augmented with Successful Transition Replay (STR) for prioritizing successful interactions. + +**Key Results:** +- At least 17% relative increase over existing methods on AndroidWorld benchmark +- Substantially fewer computational resources than GPT-4o-based methods +- 5-60x faster inference + +**Poster:** https://neurips.cc/virtual/2025/poster/119910 + +--- + +## 37. TAI3: Testing Agent Integrity in Interpreting User Intent + +**Summary:** An API-centric stress testing framework that uncovers intent integrity violations in LLM agents. Uses semantic partitioning to organize tasks into meaningful categories, with targeted mutations to expose subtle agent errors while preserving user intent. + +**Key Contributions:** +- Datatype-aware strategy memory for retrieving effective mutation patterns +- Lightweight predictor for ranking mutations by error likelihood +- Generalizes to stronger target models using smaller LLMs for test generation + +**Poster:** https://neurips.cc/virtual/2025/poster/118952 + +--- + +## 38. ThinkAct: Vision-Language-Action Reasoning via Reinforced Visual Latent Planning + +**Summary:** A dual-system framework bridging high-level reasoning with low-level action execution. Trains multimodal LLM to generate embodied reasoning plans guided by action-aligned visual rewards, compressed into visual plan latents for downstream action execution. + +**Key Capabilities:** +- Few-shot adaptation +- Long-horizon planning +- Self-correction behaviors in complex embodied AI tasks + +**Poster:** https://neurips.cc/virtual/2025/poster/119747 + +--- + +## 39. Visualization-of-Thought Attack (VoTA) against VLMs + +**Summary:** Automated attack framework that constructs chains of images with risky visual thoughts to challenge VLMs. Exploits the conflict between logical processing and safety protocols, leading to unsafe content generation. + +**Key Results:** +- Improves average attack success rate by 26.71% (from 63.70% to 90.41%) +- Tested on 9 open-source and 6 commercial VLMs +- Outperforms state-of-the-art methods + +**Poster:** https://neurips.cc/virtual/2025/poster/119873 + +--- + +## 40. Open CaptchaWorld: Benchmarking MLLM Agents on CAPTCHA Puzzles + +**Summary:** First web-based benchmark evaluating MLLM agents on diverse CAPTCHA puzzles. Spans 20 modern CAPTCHA types (225 total) with novel metric: CAPTCHA Reasoning Depth quantifying cognitive and motor steps required. + +**Key Findings:** +- Humans achieve 93.3% success rate +- State-of-the-art agents achieve at most 40.0% (Browser-Use OpenAI-o3) +- Highlights significant gap between human and agent capabilities + +**Poster:** https://neurips.cc/virtual/2025/poster/121537 + +--- + +## 41. Pixel Reasoner: Pixel-Space Reasoning with Curiosity-Driven RL + +**Summary:** Introduces pixel-space reasoning framework where VLMs use visual operations (zoom-in, select-frame) to directly inspect and infer from visual evidence. Two-phase training: instruction tuning on synthesized traces, then RL with curiosity-driven rewards. + +**Key Results:** +- 84% on V*Bench, 74% on TallyQA-Complex, 84% on InfographicsVQA +- Highest accuracy achieved by any open-source 7B model +- Enables proactive information gathering from complex visual inputs + +**Poster:** https://neurips.cc/virtual/2025/poster/117667 + +--- + +## 42. BTL-UI: Blink-Think-Link Reasoning Model for GUI Agent + +**Summary:** Brain-inspired framework decomposing interactions into three biologically plausible phases: Blink (rapid detection via saccadic-like attention), Think (higher-level reasoning/planning), and Link (executable command generation for motor control). + +**Key Innovations:** +- Automated annotation pipeline for blink data +- BTL Reward: first rule-based reward mechanism driven by both process and outcome +- Competitive performance on static GUI understanding and dynamic interaction tasks + +**Poster:** https://neurips.cc/virtual/2025/poster/119419 + +--- + +## 43. GUI Exploration Lab: Multi-Turn RL for Screen Navigation + +**Summary:** Simulation environment engine enabling flexible definition of screens, icons, and navigation graphs with full environment access for agent training/evaluation. Demonstrates progressive training approach from SFT to multi-turn RL. + +**Key Findings:** +- Supervised fine-tuning enables memorization of fundamental knowledge +- Single-turn RL enhances generalization to unseen scenarios +- Multi-turn RL encourages exploration strategies through interactive trial and error + +**Poster:** https://neurips.cc/virtual/2025/loc/san-diego/poster/117497 + +--- + +## 44. GUI-Rise: Structured Reasoning and History Summarization for GUI Navigation + +**Summary:** Reasoning-enhanced framework integrating structured reasoning, action prediction, and history summarization. Uses Chain-of-Thought analyses combining progress estimation and decision reasoning, trained via SFT and GRPO with history-aware rewards. + +**Key Results:** +- State-of-the-art under identical training data conditions +- Particularly strong in out-of-domain scenarios +- Robust reasoning and generalization across diverse GUI navigation tasks + +**Poster:** https://neurips.cc/virtual/2025/poster/117425 + +--- + +## 45. UI-Genie: A Self-Improving Framework for MLLM-based Mobile GUI Agents + +**Summary:** Self-improving framework addressing trajectory verification and training data scalability. Features UI-Genie-RM (image-text interleaved reward model) and self-improvement pipeline with reward-guided exploration and outcome verification. + +**Key Contributions:** +- UI-Genie-RM-517k: first reward-specific dataset for GUI agents +- UI-Genie-Agent-16k: high-quality synthetic trajectories without manual annotation +- State-of-the-art across multiple GUI agent benchmarks through three generations of self-improvement + +**Poster:** https://neurips.cc/virtual/2025/poster/119990 + +--- + +## What We're Building + +At Cua, we're focused on the infrastructure layer for computer-use agents: cloud sandboxes for safe execution, SDKs for agent development, and tools that make it easier to build and deploy agents in production. + +If you're experimenting with any of the approaches in these papers, our [Cloud Sandboxes](https://cua.ai) provide isolated Linux, Windows, and macOS environments where you can test agent behavior without risk to real systems. + +--- + +**Start building:** [cua.ai](https://cua.ai) + +**Join the community:** [Discord](https://discord.gg/cua-ai) diff --git a/blog/sandboxed-python-execution.md b/blog/sandboxed-python-execution.md index e0eb8391..b45a0ab5 100644 --- a/blog/sandboxed-python-execution.md +++ b/blog/sandboxed-python-execution.md @@ -378,4 +378,4 @@ Happy coding (safely)! --- -_Want to dive deeper? Check out our [sandboxed functions examples](https://github.com/trycua/cua/blob/main/examples/sandboxed_functions_examples.py) and [virtual environment tests](https://github.com/trycua/cua/blob/main/tests/venv.py) on GitHub. Questions? Come chat with us on Discord!_ +_Want to dive deeper? Check out our [sandboxed functions examples](https://github.com/trycua/cua/blob/main/examples/sandboxed_functions_examples.py) and [virtual environment tests](https://github.com/trycua/cua/blob/main/tests/test_venv.py) on GitHub. Questions? Come chat with us on Discord!_ diff --git a/blog/training-computer-use-models-trajectories-1.md b/blog/training-computer-use-models-trajectories-1.md index 040eaea4..dcfdbb12 100644 --- a/blog/training-computer-use-models-trajectories-1.md +++ b/blog/training-computer-use-models-trajectories-1.md @@ -247,7 +247,7 @@ try: await computer.interface.right_click(300, 300) await computer.interface.double_click(400, 400) - await computer.interface.type("Hello, World!") + await computer.interface.type_text("Hello, World!") await computer.interface.press_key("enter") await computer.interface.set_clipboard("Test clipboard") @@ -306,6 +306,6 @@ Now that you know how to create and share trajectories, consider these advanced ### Resources -- [Computer-Use Interface GitHub](https://github.com/trycua/cua/tree/main/libs/computer) +- [Computer-Use Interface GitHub](https://github.com/trycua/cua/tree/main/libs/python/computer) - [Hugging Face Datasets Documentation](https://huggingface.co/docs/datasets) - [Example Dataset: ddupont/test-dataset](https://huggingface.co/datasets/ddupont/test-dataset) diff --git a/blog/ubuntu-docker-support.md b/blog/ubuntu-docker-support.md index 774d0438..b489a6e0 100644 --- a/blog/ubuntu-docker-support.md +++ b/blog/ubuntu-docker-support.md @@ -174,7 +174,7 @@ await computer.run() ## Links -- **Docker Provider Docs:** [https://cua.ai/docs/computers/docker](https://cua.ai/docs/computers/docker) +- **Docker Provider Docs:** [https://cua.ai/docs/computers/docker](https://cua.ai/docs/computer-sdk/computers#linux-on-docker) - **KasmVNC:** [https://github.com/kasmtech/KasmVNC](https://github.com/kasmtech/KasmVNC) - **Container Source:** [https://github.com/trycua/cua/tree/main/libs/kasm](https://github.com/trycua/cua/tree/main/libs/kasm) - **Computer SDK:** [https://cua.ai/docs/computer-sdk/computers](https://cua.ai/docs/computer-sdk/computers) diff --git a/blog/windows-sandbox.md b/blog/windows-sandbox.md index ef577611..d0f7f8f0 100644 --- a/blog/windows-sandbox.md +++ b/blog/windows-sandbox.md @@ -239,7 +239,7 @@ But for development, prototyping, and learning Windows RPA workflows, **Windows - [Windows Sandbox Documentation](https://learn.microsoft.com/en-us/windows/security/application-security/application-isolation/windows-sandbox/) - [Cua GitHub Repository](https://github.com/trycua/cua) -- [Agent UI Documentation](https://github.com/trycua/cua/tree/main/libs/agent) +- [Agent UI Documentation](https://github.com/trycua/cua/tree/main/libs/python/agent) - [Join our Discord Community](https://discord.gg/cua-ai) --- diff --git a/docs/content/docs/agent-sdk/agent-loops.mdx b/docs/content/docs/agent-sdk/agent-loops.mdx index 57765b18..0b759b3e 100644 --- a/docs/content/docs/agent-sdk/agent-loops.mdx +++ b/docs/content/docs/agent-sdk/agent-loops.mdx @@ -34,7 +34,7 @@ async def take_screenshot(): ) as computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], max_trajectory_budget=5.0 ) @@ -89,7 +89,7 @@ Use the following environment variables to configure the agent and its access to ```bash # Computer instance (cloud) -export CUA_CONTAINER_NAME="your-container-name" +export CUA_SANDBOX_NAME="your-sandbox-name" export CUA_API_KEY="your-cua-api-key" # LLM API keys @@ -121,7 +121,7 @@ The output is an AsyncGenerator that yields response chunks. The `ComputerAgent` constructor provides a wide range of options for customizing agent behavior, tool integration, callbacks, resource management, and more. - `model` (`str`): Default: **required** - The LLM or agent model to use. Determines which agent loop is selected unless `custom_loop` is provided. (e.g., "claude-3-5-sonnet-20241022", "computer-use-preview", "omni+vertex_ai/gemini-pro") + The LLM or agent model to use. Determines which agent loop is selected unless `custom_loop` is provided. (e.g., "claude-sonnet-4-5-20250929", "computer-use-preview", "omni+vertex_ai/gemini-pro") - `tools` (`List[Any]`): List of tools the agent can use (e.g., `Computer`, sandboxed Python functions, etc.). - `custom_loop` (`Callable`): @@ -159,7 +159,7 @@ from computer import Computer from agent.callbacks import ImageRetentionCallback agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[Computer(...)], only_n_most_recent_images=3, callbacks=[ImageRetentionCallback(only_n_most_recent_images=3)], diff --git a/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx b/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx index 4a76dc95..636eda3b 100644 --- a/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx +++ b/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx @@ -13,7 +13,7 @@ Optimize agent costs with budget management and image retention callbacks. from agent.callbacks import BudgetManagerCallback agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], callbacks=[ BudgetManagerCallback( @@ -30,7 +30,7 @@ agent = ComputerAgent( ```python # Simple budget limit agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", max_trajectory_budget=5.0 # $5 limit ) ``` @@ -40,7 +40,7 @@ agent = ComputerAgent( ```python # Advanced budget configuration agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", max_trajectory_budget={ "max_budget": 10.0, "raise_error": True, # Raise error when exceeded @@ -55,7 +55,7 @@ agent = ComputerAgent( from agent.callbacks import ImageRetentionCallback agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], callbacks=[ ImageRetentionCallback(only_n_most_recent_images=3) @@ -67,7 +67,7 @@ agent = ComputerAgent( ```python agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], only_n_most_recent_images=3 # Auto-adds ImageRetentionCallback ) @@ -77,7 +77,7 @@ agent = ComputerAgent( ```python agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], max_trajectory_budget=5.0, # Budget limit only_n_most_recent_images=3, # Image retention diff --git a/docs/content/docs/agent-sdk/callbacks/index.mdx b/docs/content/docs/agent-sdk/callbacks/index.mdx index 71b63a2e..a02cea9e 100644 --- a/docs/content/docs/agent-sdk/callbacks/index.mdx +++ b/docs/content/docs/agent-sdk/callbacks/index.mdx @@ -21,7 +21,7 @@ from agent.callbacks import ( ) agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], callbacks=[ ImageRetentionCallback(only_n_most_recent_images=3), diff --git a/docs/content/docs/agent-sdk/callbacks/logging.mdx b/docs/content/docs/agent-sdk/callbacks/logging.mdx index 2ed3dda8..62e3d9b2 100644 --- a/docs/content/docs/agent-sdk/callbacks/logging.mdx +++ b/docs/content/docs/agent-sdk/callbacks/logging.mdx @@ -14,7 +14,7 @@ from agent.callbacks import LoggingCallback import logging agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], callbacks=[ LoggingCallback( @@ -29,7 +29,7 @@ agent = ComputerAgent( ```python agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], verbosity=logging.INFO # Auto-adds LoggingCallback ) @@ -72,7 +72,7 @@ class CustomLogger(AsyncCallbackHandler): # Use custom logger agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], callbacks=[CustomLogger("my_agent")] ) diff --git a/docs/content/docs/agent-sdk/callbacks/trajectories.mdx b/docs/content/docs/agent-sdk/callbacks/trajectories.mdx index 7bffaa8f..ade285f3 100644 --- a/docs/content/docs/agent-sdk/callbacks/trajectories.mdx +++ b/docs/content/docs/agent-sdk/callbacks/trajectories.mdx @@ -13,7 +13,7 @@ The TrajectorySaverCallback records complete agent conversations including messa from agent.callbacks import TrajectorySaverCallback agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], callbacks=[ TrajectorySaverCallback( @@ -28,7 +28,7 @@ agent = ComputerAgent( ```python agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", trajectory_dir="trajectories", # Auto-save trajectories tools=[computer] ) diff --git a/docs/content/docs/agent-sdk/chat-history.mdx b/docs/content/docs/agent-sdk/chat-history.mdx index e7041c3b..e6740fc3 100644 --- a/docs/content/docs/agent-sdk/chat-history.mdx +++ b/docs/content/docs/agent-sdk/chat-history.mdx @@ -83,7 +83,7 @@ For long conversations, consider using the `only_n_most_recent_images` parameter ```python agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer], only_n_most_recent_images=3 ) diff --git a/docs/content/docs/agent-sdk/custom-tools.mdx b/docs/content/docs/agent-sdk/custom-tools.mdx index 00847cf6..63d878cb 100644 --- a/docs/content/docs/agent-sdk/custom-tools.mdx +++ b/docs/content/docs/agent-sdk/custom-tools.mdx @@ -16,7 +16,7 @@ def calculate(a: int, b: int) -> int: # Use with agent agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer, calculate] ) ``` @@ -43,7 +43,7 @@ from computer import Computer computer = Computer(...) agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer, read_file], ) ``` diff --git a/docs/content/docs/agent-sdk/customizing-computeragent.mdx b/docs/content/docs/agent-sdk/customizing-computeragent.mdx index e7d3c030..158495e0 100644 --- a/docs/content/docs/agent-sdk/customizing-computeragent.mdx +++ b/docs/content/docs/agent-sdk/customizing-computeragent.mdx @@ -1,5 +1,5 @@ --- -title: Customizing Your ComputerAgent +title: Customize ComputerAgent --- @@ -74,7 +74,7 @@ Callbacks provide lifecycle hooks to preprocess messages, postprocess outputs, r from agent.callbacks import ImageRetentionCallback, TrajectorySaverCallback, BudgetManagerCallback agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer], callbacks=[ ImageRetentionCallback(only_n_most_recent_images=3), diff --git a/docs/content/docs/agent-sdk/integrations/meta.json b/docs/content/docs/agent-sdk/integrations/meta.json index 7b7ebb81..39da855e 100644 --- a/docs/content/docs/agent-sdk/integrations/meta.json +++ b/docs/content/docs/agent-sdk/integrations/meta.json @@ -1,4 +1,4 @@ { "title": "Integrations", - "pages": ["hud"] + "pages": ["hud", "observability"] } diff --git a/docs/content/docs/agent-sdk/integrations/observability.mdx b/docs/content/docs/agent-sdk/integrations/observability.mdx new file mode 100644 index 00000000..44db0ea4 --- /dev/null +++ b/docs/content/docs/agent-sdk/integrations/observability.mdx @@ -0,0 +1,66 @@ +--- +title: Observability +description: Trace CUA execution steps and sessions +--- + +## Observability + +CUA has a native integration with [Laminar](https://laminar.sh/) – open-source platform for tracing, evals, and labeling of autonomous AI agents. Read more about Laminar in the [Laminar docs](https://docs.lmnr.ai/). + +## Setup + +Register on [Laminar Cloud](https://laminar.sh/) or spin up a [local instance](https://github.com/lmnr-ai/lmnr) and get the key from your project settings. Set the `LMNR_PROJECT_API_KEY` environment variable to your key. + +```bash +pip install lmnr[all] +export LMNR_PROJECT_API_KEY=your-key +``` + +## Usage + +Then, initialize Laminar at the entry point of your application, register Laminar LiteLLM callback, and all steps of CUA will be automatically traced. + +```python +import os + +import litellm + +from agent import ComputerAgent +from computer import Computer +from lmnr import Laminar, LaminarLiteLLMCallback # [!code highlight] + +Laminar.initialize() # [!code highlight] +litellm.callbacks.append(LaminarLiteLLMCallback()) # [!code highlight] + +computer = Computer( + os_type="linux", + provider_type="cloud", + name=os.getenv("CUA_CONTAINER_NAME"), + api_key=os.getenv("CUA_API_KEY"), +) + +agent = ComputerAgent( + model="openai/computer-use-preview", + tools=[computer], +) + +async def main(): + async for step in agent.run("Create a new file called 'test.txt' in the current directory"): + print(step["output"]) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Viewing traces + +You can view traces in the Laminar UI by going to the traces tab in your project. When you select a trace, +you will see all the agent execution steps, including computer actions, LLM calls, and screenshots. + +For each step, you will see the LLM call, the computer action. The computer actions are highlighted in the timeline in yellow. + +Example trace in Laminar showing the litellm.response span and its output. diff --git a/docs/content/docs/agent-sdk/meta.json b/docs/content/docs/agent-sdk/meta.json index b2b75fa3..0a733f28 100644 --- a/docs/content/docs/agent-sdk/meta.json +++ b/docs/content/docs/agent-sdk/meta.json @@ -10,11 +10,10 @@ "customizing-computeragent", "callbacks", "custom-tools", - "custom-computer-handlers", "prompt-caching", "usage-tracking", + "telemetry", "benchmarks", - "migration-guide", "integrations" ] } diff --git a/docs/content/docs/agent-sdk/migration-guide.mdx b/docs/content/docs/agent-sdk/migration-guide.mdx index ec75ab7a..fd67b282 100644 --- a/docs/content/docs/agent-sdk/migration-guide.mdx +++ b/docs/content/docs/agent-sdk/migration-guide.mdx @@ -7,7 +7,7 @@ This guide lists **breaking changes** when migrating from the original `Computer ## Breaking Changes - **Initialization:** - - `ComputerAgent` (v0.4.x) uses `model` as a string (e.g. "anthropic/claude-3-5-sonnet-20241022") instead of `LLM` and `AgentLoop` objects. + - `ComputerAgent` (v0.4.x) uses `model` as a string (e.g. "anthropic/claude-sonnet-4-5-20250929") instead of `LLM` and `AgentLoop` objects. - `tools` is a list (can include multiple computers and decorated functions). - `callbacks` are now first-class for extensibility (image retention, budget, trajectory, logging, etc). - **No explicit `loop` parameter:** @@ -39,7 +39,7 @@ async with Computer() as computer: ```python async with Computer() as computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer] ) messages = [{"role": "user", "content": "Take a screenshot"}] diff --git a/docs/content/docs/agent-sdk/prompt-caching.mdx b/docs/content/docs/agent-sdk/prompt-caching.mdx index cdcf7db5..633830ec 100644 --- a/docs/content/docs/agent-sdk/prompt-caching.mdx +++ b/docs/content/docs/agent-sdk/prompt-caching.mdx @@ -38,7 +38,7 @@ With the OpenAI provider, prompt caching is handled automatically for prompts of ```python from agent import ComputerAgent agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", use_prompt_caching=True, ) ``` diff --git a/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx b/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx index 4e389365..4ede3a26 100644 --- a/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/composed-agents.mdx @@ -32,7 +32,7 @@ Any vision-enabled LiteLLM-compatible model can be used as the planning componen - Any All‑in‑one CUA (planning-capable). See [All‑in‑one CUAs](./computer-use-agents). - Any VLM via LiteLLM providers: `anthropic/*`, `openai/*`, `openrouter/*`, `gemini/*`, `vertex_ai/*`, `huggingface-local/*`, `mlx/*`, etc. - Examples: - - **Anthropic**: `anthropic/claude-3-5-sonnet-20241022`, `anthropic/claude-opus-4-1-20250805` + - **Anthropic**: `anthropic/claude-sonnet-4-5-20250929`, `anthropic/claude-opus-4-1-20250805` - **OpenAI**: `openai/gpt-5`, `openai/gpt-o3`, `openai/gpt-4o` - **Google**: `gemini/gemini-1.5-pro`, `vertex_ai/gemini-pro-vision` - **Local models**: Any Hugging Face vision-language model @@ -41,7 +41,7 @@ Any vision-enabled LiteLLM-compatible model can be used as the planning componen ### GTA1 + GPT-5 -Use Google's Gemini for planning with specialized grounding: +Use OpenAI's GPT-5 for planning with specialized grounding: ```python agent = ComputerAgent( @@ -59,7 +59,7 @@ Combine state-of-the-art grounding with powerful reasoning: ```python agent = ComputerAgent( - "huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-3-5-sonnet-20241022", + "huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-sonnet-4-5-20250929", tools=[computer] ) @@ -113,7 +113,7 @@ async for _ in agent.run("Close the settings window, then open the Downloads fol Composed agents support both capabilities: ```python -agent = ComputerAgent("huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-3-5-sonnet-20241022") +agent = ComputerAgent("huggingface-local/HelloKKMe/GTA1-7B+anthropic/claude-sonnet-4-5-20250929") # Full computer-use agent capabilities async for _ in agent.run("Complete this online form"): diff --git a/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx b/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx index 9621e520..49a92aa7 100644 --- a/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/computer-use-agents.mdx @@ -29,10 +29,9 @@ Claude models with computer-use capabilities: - Claude 4.1: `claude-opus-4-1-20250805` - Claude 4: `claude-opus-4-20250514`, `claude-sonnet-4-20250514` - Claude 3.7: `claude-3-7-sonnet-20250219` -- Claude 3.5: `claude-3-5-sonnet-20241022` ```python -agent = ComputerAgent("claude-3-5-sonnet-20241022", tools=[computer]) +agent = ComputerAgent("claude-sonnet-4-5-20250929", tools=[computer]) async for _ in agent.run("Open Firefox and navigate to github.com"): pass ``` @@ -78,10 +77,10 @@ async for _ in agent.run("Open Firefox and navigate to github.com"): Qwen3 VL family: -- `openrouter/qwen/qwen3-vl-235b-a22b-instruct` +- `cua/qwen/qwen3-vl-235b` (via CUA VLM Router - recommended) ```python -agent = ComputerAgent("openrouter/qwen/qwen3-vl-235b-a22b-instruct", tools=[computer]) +agent = ComputerAgent("cua/qwen/qwen3-vl-235b", tools=[computer]) async for _ in agent.run("Open Firefox and navigate to github.com"): pass ``` diff --git a/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx b/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx index 1f12de9a..9c92ac26 100644 --- a/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/grounding-models.mdx @@ -11,10 +11,10 @@ All models that support `ComputerAgent.run()` also support `ComputerAgent.predic ### Anthropic CUAs +- Claude 4.5: `claude-sonnet-4-5-20250929` - Claude 4.1: `claude-opus-4-1-20250805` - Claude 4: `claude-opus-4-20250514`, `claude-sonnet-4-20250514` - Claude 3.7: `claude-3-7-sonnet-20250219` -- Claude 3.5: `claude-3-5-sonnet-20241022` ### OpenAI CUA Preview @@ -61,7 +61,7 @@ Moondream3 is a powerful small model that can perform UI grounding and click pre ```python # Using any grounding model for click prediction -agent = ComputerAgent("claude-3-5-sonnet-20241022", tools=[computer]) +agent = ComputerAgent("claude-sonnet-4-5-20250929", tools=[computer]) # Predict coordinates for specific elements login_coords = agent.predict_click("find the login button") @@ -75,7 +75,7 @@ print(f"Menu icon: {menu_coords}") ```python # OmniParser is just for OCR, so it requires an LLM for predict_click -agent = ComputerAgent("omniparser+anthropic/claude-3-5-sonnet-20241022", tools=[computer]) +agent = ComputerAgent("omniparser+anthropic/claude-sonnet-4-5-20250929", tools=[computer]) # Predict click coordinates using composed agent coords = agent.predict_click("find the submit button") diff --git a/docs/content/docs/agent-sdk/supported-model-providers/cua-vlm-router.mdx b/docs/content/docs/agent-sdk/supported-model-providers/cua-vlm-router.mdx new file mode 100644 index 00000000..930360a2 --- /dev/null +++ b/docs/content/docs/agent-sdk/supported-model-providers/cua-vlm-router.mdx @@ -0,0 +1,441 @@ +--- +title: CUA VLM Router +description: Intelligent vision-language model routing with cost optimization and unified access +--- + +# CUA VLM Router + +The **CUA VLM Router** is an intelligent inference API that provides unified access to multiple vision-language model providers through a single API key. It offers cost optimization and detailed observability for production AI applications. + +## Overview + +Instead of managing multiple API keys and provider-specific code, CUA VLM Router acts as a smart cloud gateway that: + +- **Unifies access** to multiple model providers +- **Optimizes costs** through intelligent routing and provider selection +- **Tracks usage** and costs with detailed metadata +- **Provides observability** with routing decisions and attempt logs +- **Managed infrastructure** - no need to manage provider API keys yourself + +## Quick Start + +### 1. Get Your API Key + +Sign up at [cua.ai](https://cua.ai/signin) and get your CUA API key from the dashboard. + +### 2. Set Environment Variable + +```bash +export CUA_API_KEY="sk_cua-api01_..." +``` + +### 3. Use with Agent SDK + +```python +from agent import ComputerAgent +from computer import Computer + +computer = Computer(os_type="linux", provider_type="docker") + +agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + max_trajectory_budget=5.0 +) + +messages = [{"role": "user", "content": "Take a screenshot and tell me what's on screen"}] + +async for result in agent.run(messages): + for item in result["output"]: + if item["type"] == "message": + print(item["content"][0]["text"]) +``` + +## Available Models + +The CUA VLM Router currently supports these models: + +| Model ID | Provider | Description | Best For | +| --------------------------------- | --------- | ----------------- | --------------------------------------- | +| `cua/anthropic/claude-sonnet-4.5` | Anthropic | Claude Sonnet 4.5 | General-purpose tasks, recommended | +| `cua/anthropic/claude-opus-4.5` | Anthropic | Claude Opus 4.5 | Enhanced agentic and computer-use tasks | +| `cua/anthropic/claude-haiku-4.5` | Anthropic | Claude Haiku 4.5 | Fast responses, cost-effective | +| `cua/qwen/qwen3-vl-235b` | Qwen | Qwen3 VL 235B | Large-scale vision-language tasks | + +## How It Works + +### Intelligent Routing + +When you make a request to CUA VLM Router: + +1. **Model Resolution**: Your model ID (e.g., `cua/anthropic/claude-sonnet-4.5`) is resolved to the appropriate provider +2. **Provider Selection**: CUA routes your request to the appropriate model provider +3. **Response**: You receive an OpenAI-compatible response with metadata + +## API Reference + +### Base URL + +``` +https://inference.cua.ai/v1 +``` + +### Authentication + +All requests require an API key in the Authorization header: + +```bash +Authorization: Bearer sk_cua-api01_... +``` + +### Endpoints + +#### List Available Models + +```bash +GET /v1/models +``` + +**Response:** + +```json +{ + "data": [ + { + "id": "anthropic/claude-sonnet-4.5", + "name": "Claude Sonnet 4.5", + "object": "model", + "owned_by": "cua" + } + ], + "object": "list" +} +``` + +#### Chat Completions + +```bash +POST /v1/chat/completions +Content-Type: application/json +``` + +**Request:** + +```json +{ + "model": "anthropic/claude-sonnet-4.5", + "messages": [{ "role": "user", "content": "Hello!" }], + "max_tokens": 100, + "temperature": 0.7, + "stream": false +} +``` + +**Response:** + +```json +{ + "id": "gen_...", + "object": "chat.completion", + "created": 1763554838, + "model": "anthropic/claude-sonnet-4.5", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I help you today?" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 12, + "total_tokens": 22, + "cost": 0.01, + "is_byok": true + } +} +``` + +#### Streaming + +Set `"stream": true` to receive server-sent events: + +```bash +curl -X POST https://inference.cua.ai/v1/chat/completions \ + -H "Authorization: Bearer sk_cua-api01_..." \ + -H "Content-Type: application/json" \ + -d '{ + "model": "anthropic/claude-sonnet-4.5", + "messages": [{"role": "user", "content": "Count to 5"}], + "stream": true + }' +``` + +**Response (SSE format):** + +``` +data: {"id":"gen_...","choices":[{"delta":{"content":"1"}}],"object":"chat.completion.chunk"} + +data: {"id":"gen_...","choices":[{"delta":{"content":"\n2"}}],"object":"chat.completion.chunk"} + +data: {"id":"gen_...","choices":[{"delta":{"content":"\n3\n4\n5"}}],"object":"chat.completion.chunk"} + +data: {"id":"gen_...","choices":[{"delta":{},"finish_reason":"stop"}],"usage":{...}} +``` + +#### Check Balance + +```bash +GET /v1/balance +``` + +**Response:** + +```json +{ + "balance": 211689.85, + "currency": "credits" +} +``` + +## Cost Tracking + +CUA VLM Router provides detailed cost information in every response: + +### Credit System + +Requests are billed in **credits**: + +- Credits are deducted from your CUA account balance +- Prices vary by model and usage +- CUA manages all provider API keys and infrastructure + +### Response Cost Fields + +```json +{ + "usage": { + "cost": 0.01, // CUA gateway cost in credits + "market_cost": 0.000065 // Actual upstream API cost + } +} +``` + +**Note:** CUA VLM Router is a fully managed cloud service. If you want to use your own provider API keys directly (BYOK), see the [Supported Model Providers](/agent-sdk/supported-model-providers/) page for direct provider access via the agent SDK. + +## Response Metadata + +CUA VLM Router includes metadata about routing decisions and costs in the response. This information helps with debugging and monitoring your application's model usage. + +## Configuration + +### Environment Variables + +```bash +# Required: Your CUA API key +export CUA_API_KEY="sk_cua-api01_..." + +# Optional: Custom endpoint (defaults to https://inference.cua.ai/v1) +export CUA_BASE_URL="https://custom-endpoint.cua.ai/v1" +``` + +### Python SDK Configuration + +```python +from agent import ComputerAgent + +# Using environment variables (recommended) +agent = ComputerAgent(model="cua/anthropic/claude-sonnet-4.5") + +# Or explicit configuration +agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + # CUA adapter automatically loads from CUA_API_KEY +) +``` + +## Benefits Over Direct Provider Access + +| Feature | CUA VLM Router | Direct Provider (BYOK) | +| -------------------------- | ---------------------------- | --------------------------------- | +| **Single API Key** | βœ… One key for all providers | ❌ Multiple keys to manage | +| **Managed Infrastructure** | βœ… No API key management | ❌ Manage multiple provider keys | +| **Usage Tracking** | βœ… Unified dashboard | ❌ Per-provider tracking | +| **Model Switching** | βœ… Change model string only | ❌ Change code + keys | +| **Setup Complexity** | βœ… One environment variable | ❌ Multiple environment variables | + +## Error Handling + +### Common Error Responses + +#### Invalid API Key + +```json +{ + "detail": "Insufficient credits. Current balance: 0.00 credits" +} +``` + +#### Missing Authorization + +```json +{ + "detail": "Missing Authorization: Bearer token" +} +``` + +#### Invalid Model + +```json +{ + "detail": "Invalid or unavailable model" +} +``` + +### Best Practices + +1. **Check balance periodically** using `/v1/balance` +2. **Handle rate limits** with exponential backoff +3. **Log generation IDs** for debugging +4. **Set up usage alerts** in your CUA dashboard + +## Examples + +### Basic Usage + +```python +from agent import ComputerAgent +from computer import Computer + +computer = Computer(os_type="linux", provider_type="docker") + +agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer] +) + +messages = [{"role": "user", "content": "Open Firefox"}] + +async for result in agent.run(messages): + print(result) +``` + +### Direct API Call (curl) + +```bash +curl -X POST https://inference.cua.ai/v1/chat/completions \ + -H "Authorization: Bearer ${CUA_API_KEY}" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "anthropic/claude-sonnet-4.5", + "messages": [ + {"role": "user", "content": "Explain quantum computing"} + ], + "max_tokens": 200 + }' +``` + +### With Custom Parameters + +```python +agent = ComputerAgent( + model="cua/anthropic/claude-haiku-4.5", + tools=[computer], + max_trajectory_budget=10.0, + temperature=0.7 +) +``` + +### Using Qwen3 VL 235B + +```python +from agent import ComputerAgent +from computer import Computer + +computer = Computer(os_type="linux", provider_type="docker") + +agent = ComputerAgent( + model="cua/qwen/qwen3-vl-235b", + tools=[computer], + only_n_most_recent_images=3 +) + +messages = [{"role": "user", "content": "Open a browser and search for Python tutorials"}] + +async for result in agent.run(messages): + print(result) +``` + +### Using Claude Opus 4.5 + +```python +from agent import ComputerAgent +from computer import Computer + +computer = Computer( + os_type="linux", + provider_type="cloud", + name="your-container-name", + api_key="your-cua-api-key" +) + +agent = ComputerAgent( + model="cua/anthropic/claude-opus-4.5", + tools=[computer], + instructions="You are a helpful assistant that can control computers", + only_n_most_recent_images=3 +) + +messages = [{"role": "user", "content": "Open a browser and search for Python tutorials"}] + +async for result in agent.run(messages): + print(result) +``` + +## Migration from Direct Provider Access + +Switching from direct provider access (BYOK) to CUA VLM Router is simple: + +**Before (Direct Provider Access with BYOK):** + +```python +import os +# Required: Provider-specific API key +os.environ["ANTHROPIC_API_KEY"] = "sk-ant-..." + +agent = ComputerAgent( + model="anthropic/claude-sonnet-4-5-20250929", + tools=[computer] +) +``` + +**After (CUA VLM Router - Cloud Service):** + +```python +import os +# Required: CUA API key only (no provider keys needed) +os.environ["CUA_API_KEY"] = "sk_cua-api01_..." + +agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", # Add "cua/" prefix + tools=[computer] +) +``` + +That's it! Same code structure, just different model format. CUA manages all provider infrastructure and credentials for you. + +## Support + +- **Documentation**: [cua.ai/docs](https://cua.ai/docs) +- **Discord**: [Join our community](https://discord.com/invite/mVnXXpdE85) +- **Issues**: [GitHub Issues](https://github.com/trycua/cua/issues) + +## Next Steps + +- Explore [Agent Loops](/agent-sdk/agent-loops) to customize agent behavior +- Learn about [Cost Saving Callbacks](/agent-sdk/callbacks/cost-saving) +- Try [Example Use Cases](/example-usecases/form-filling) +- Review [Supported Model Providers](/agent-sdk/supported-model-providers/) for all options diff --git a/docs/content/docs/agent-sdk/supported-model-providers/index.mdx b/docs/content/docs/agent-sdk/supported-model-providers/index.mdx index 9177e712..270c8fb3 100644 --- a/docs/content/docs/agent-sdk/supported-model-providers/index.mdx +++ b/docs/content/docs/agent-sdk/supported-model-providers/index.mdx @@ -4,23 +4,51 @@ title: Supported Model Providers ## Supported Models -### Anthropic Claude (Computer Use API) +### CUA VLM Router (Recommended) + +Use CUA's cloud inference API for intelligent routing and cost optimization with a single API key. CUA manages all provider infrastructure and credentials for you. + +```python +model="cua/anthropic/claude-sonnet-4.5" # Claude Sonnet 4.5 (recommended) +model="cua/anthropic/claude-haiku-4.5" # Claude Haiku 4.5 (faster) +``` + +**Benefits:** + +- Single API key for multiple providers +- Cost tracking and optimization +- Fully managed infrastructure (no provider keys to manage) + +[Learn more about CUA VLM Router β†’](/agent-sdk/supported-model-providers/cua-vlm-router) + +--- + +### Anthropic Claude (Computer Use API - BYOK) + +Direct access to Anthropic's Claude models using your own Anthropic API key (BYOK - Bring Your Own Key). ```python -model="anthropic/claude-3-5-sonnet-20241022" model="anthropic/claude-3-7-sonnet-20250219" model="anthropic/claude-opus-4-20250514" model="anthropic/claude-sonnet-4-20250514" ``` -### OpenAI Computer Use Preview +**Setup:** Set `ANTHROPIC_API_KEY` environment variable with your Anthropic API key. + +### OpenAI Computer Use Preview (BYOK) + +Direct access to OpenAI's computer use models using your own OpenAI API key (BYOK). ```python model="openai/computer-use-preview" ``` +**Setup:** Set `OPENAI_API_KEY` environment variable with your OpenAI API key. + ### UI-TARS (Local or Huggingface Inference) +Run UI-TARS models locally for privacy and offline use. + ```python model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B" model="ollama_chat/0000/ui-tars-1.5-7b" @@ -28,9 +56,11 @@ model="ollama_chat/0000/ui-tars-1.5-7b" ### Omniparser + Any LLM +Combine Omniparser for UI understanding with any LLM provider. + ```python model="omniparser+ollama_chat/mistral-small3.2" model="omniparser+vertex_ai/gemini-pro" -model="omniparser+anthropic/claude-3-5-sonnet-20241022" +model="omniparser+anthropic/claude-sonnet-4-5-20250929" model="omniparser+openai/gpt-4o" ``` diff --git a/docs/content/docs/telemetry.mdx b/docs/content/docs/agent-sdk/telemetry.mdx similarity index 61% rename from docs/content/docs/telemetry.mdx rename to docs/content/docs/agent-sdk/telemetry.mdx index fb5437c1..4dcb80a8 100644 --- a/docs/content/docs/telemetry.mdx +++ b/docs/content/docs/agent-sdk/telemetry.mdx @@ -1,84 +1,74 @@ --- title: Telemetry -description: This document explains how telemetry works in CUA libraries and how you can control it. -icon: RadioTower +description: How telemetry works in Cua and how to control it --- -# Telemetry in CUA +# Telemetry -CUA tracks anonymized usage and error report statistics; we ascribe to Posthog's approach as detailed [here](https://posthog.com/blog/open-source-telemetry-ethical). If you would like to opt out of sending anonymized info, you can set `telemetry_enabled` to false. +Cua collects anonymized usage and error statistics. We follow [Posthog's ethical telemetry approach](https://posthog.com/blog/open-source-telemetry-ethical). To opt out, set `telemetry_enabled` to false. -## What telemetry data we collect +## What we collect -CUA libraries collect usage data to help improve our software. We have two categories of telemetry: +### Enabled by default (opt-out) -### Opt-Out Telemetry (Enabled by Default) +- System info: OS, OS version, Python version +- Module initialization: When modules are imported and their versions +- Performance: Agent run durations, step counts, token usage, API costs +- Session tracking: Anonymous session IDs and run IDs -Basic performance metrics and system information that help us understand usage patterns: +### Disabled by default (opt-in) -- **System Information**: Operating system, OS version, Python version -- **Module Initialization**: When modules are imported and their versions -- **Performance Metrics**: Agent run durations, step counts, token usage, and API costs -- **Session Tracking**: Anonymous session IDs and run IDs for performance analysis - -### Opt-In Telemetry (Disabled by Default) - -**Conversation Trajectory Logging**: Full conversation history including: +**Trajectory logging** captures full conversation history: - User messages and agent responses -- Computer actions and their outputs -- Reasoning traces from the agent +- Computer actions and outputs +- Agent reasoning traces -**Important**: Trajectory logging is **opt-in only** and must be explicitly enabled. +Must be explicitly enabled. -### We do NOT collect: +### We don't collect - Personal information or user identifiers - API keys or credentials - File contents or application data -- Information about files being accessed -- Actual screenshots or screen contents (unless trajectory logging is enabled) -- Specific text being typed, including user inputs, model outputs, computer outputs, or tool call outputs (unless trajectory logging is enabled) +- Files being accessed +- Screenshots or screen contents (unless trajectory logging is enabled) +- Text being typed, user inputs, model outputs, computer outputs, or tool call outputs (unless trajectory logging is enabled) -## Controlling Telemetry +## How to disable -We are committed to transparency and user control over telemetry. There are two ways to control telemetry: +### Environment variable (global) -### 1. Environment Variable (Global Control) - -Telemetry is enabled by default. To disable telemetry, set the `CUA_TELEMETRY_ENABLED` environment variable to a falsy value (`0`, `false`, `no`, or `off`): +Set `CUA_TELEMETRY_ENABLED` to a falsy value (`0`, `false`, `no`, or `off`): ```bash -# Disable telemetry before running your script export CUA_TELEMETRY_ENABLED=false - -# Or as part of the command -CUA_TELEMETRY_ENABLED=1 python your_script.py - ``` -Or from Python: +Or in Python: ```python import os os.environ["CUA_TELEMETRY_ENABLED"] = "false" ``` -### 2. Instance-Level Control + + **Deprecated environment variables:** The environment variables `CUA_TELEMETRY` and + `CUA_TELEMETRY_DISABLED` are deprecated and no longer have any effect. Use `CUA_TELEMETRY_ENABLED` + instead. + -#### Computer SDK +### Per instance + +**Computer SDK:** ```python from computer import Computer -# Enable telemetry (default) -computer = Computer(telemetry_enabled=True) - -# Disable telemetry computer = Computer(telemetry_enabled=False) ``` -#### Agent SDK +**Agent SDK:** ```python from agent import ComputerAgent @@ -86,60 +76,60 @@ import os # Basic telemetry - performance metrics only (opt-out, enabled by default) agent = ComputerAgent( - model="claude-3-5-sonnet-20241022", + model="claude-sonnet-4-5-20250929", telemetry_enabled=True # Default is True ) # Enable telemetry with full conversation trajectory logging (opt-in) agent = ComputerAgent( - model="claude-3-5-sonnet-20241022", + model="claude-sonnet-4-5-20250929", telemetry_enabled={ "log_trajectory": True # Logs full conversation items } ) -# Disable telemetry completely +# Disable completely agent = ComputerAgent( - model="claude-3-5-sonnet-20241022", + model="claude-sonnet-4-5-20250929", telemetry_enabled=False ) -# Disable telemetry completely using environment variables -os.environ["CUA_TELEMETRY_ENABLED"] = "false" +# Enable trajectory logging (opt-in) agent = ComputerAgent( - model="claude-3-5-sonnet-20241022" + model="claude-sonnet-4-5-20250929", + telemetry_enabled={"log_trajectory": True} ) ``` -You can check if telemetry is enabled for an instance: +Check status: ```python -print(computer.telemetry_enabled) # Will print True or False -print(agent.telemetry_enabled) # Will print True, False, or dict +print(computer.telemetry_enabled) # True or False +print(agent.telemetry_enabled) # True, False, or dict ``` -Note that telemetry settings must be configured during initialization and cannot be changed after the object is created. +Telemetry settings are configured at initialization and can't be changed afterward. -## Detailed Telemetry Events +## Events collected -### Computer SDK Events +### Computer SDK | Event Name | Data Collected | Trigger Notes | | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | | **computer_initialized** | β€’ `os`: Operating system (e.g., 'windows', 'darwin', 'linux')
β€’ `os_version`: OS version
β€’ `python_version`: Python version | Triggered when a Computer instance is created | | **module_init** | β€’ `module`: "computer"
β€’ `version`: Package version
β€’ `python_version`: Full Python version string | Triggered once when the computer package is imported for the first time | -### Agent SDK Events +### Agent SDK | Event Name | Data Collected | Trigger Notes | | ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | | **module_init** | β€’ `module`: "agent"
β€’ `version`: Package version
β€’ `python_version`: Full Python version string | Triggered once when the agent package is imported for the first time | -| **agent_session_start** | β€’ `session_id`: Unique UUID for this agent instance
β€’ `agent_type`: Class name (e.g., "ComputerAgent")
β€’ `model`: Model name (e.g., "claude-3-5-sonnet")
β€’ `os`: Operating system
β€’ `os_version`: OS version
β€’ `python_version`: Python version | Triggered when TelemetryCallback is initialized (agent instantiation) | +| **agent_session_start** | β€’ `session_id`: Unique UUID for this agent instance
β€’ `agent_type`: Class name (e.g., "ComputerAgent")
β€’ `model`: Model name (e.g., "claude-sonnet-4-5")
β€’ `os`: Operating system
β€’ `os_version`: OS version
β€’ `python_version`: Python version | Triggered when TelemetryCallback is initialized (agent instantiation) | | **agent_run_start** | β€’ `session_id`: Agent session UUID
β€’ `run_id`: Unique UUID for this run
β€’ `start_time`: Unix timestamp
β€’ `input_context_size`: Character count of input messages
β€’ `num_existing_messages`: Count of existing messages
β€’ `uploaded_trajectory`: Full conversation items (opt-in) | Triggered at the start of each agent.run() call | | **agent_run_end** | β€’ `session_id`: Agent session UUID
β€’ `run_id`: Run UUID
β€’ `end_time`: Unix timestamp
β€’ `duration_seconds`: Total run duration
β€’ `num_steps`: Total steps taken in this run
β€’ `total_usage`: Accumulated token usage and costs
β€’ `uploaded_trajectory`: Full conversation items (opt-in) | Triggered at the end of each agent.run() call | | **agent_step** | β€’ `session_id`: Agent session UUID
β€’ `run_id`: Run UUID
β€’ `step`: Step number (incremental)
β€’ `timestamp`: Unix timestamp
β€’ `duration_seconds`: Duration of previous step | Triggered on each agent response/step during a run | | **agent_usage** | β€’ `session_id`: Agent session UUID
β€’ `run_id`: Run UUID
β€’ `step`: Current step number
β€’ `prompt_tokens`: Tokens in prompt
β€’ `completion_tokens`: Tokens in response
β€’ `total_tokens`: Total tokens used
β€’ `response_cost`: Cost of this API call | Triggered whenever usage information is received from LLM API | -## Transparency +## Questions -We believe in being transparent about the data we collect. If you have any questions about our telemetry practices, please open an issue on our GitHub repository. +Questions about telemetry? Open an issue on our [GitHub repository](https://github.com/trycua/cua). diff --git a/docs/content/docs/computer-sdk/cloud-vm-management.mdx b/docs/content/docs/computer-sdk/cloud-vm-management.mdx index 89af7639..3db79e62 100644 --- a/docs/content/docs/computer-sdk/cloud-vm-management.mdx +++ b/docs/content/docs/computer-sdk/cloud-vm-management.mdx @@ -1,32 +1,32 @@ --- -title: Cloud VM Management -description: Manage your Cua Cloud sandboxes (VMs) via Python SDK or HTTP API +title: Cloud Sandbox Management +description: Manage your Cua Cloud sandboxes via Python SDK or HTTP API --- import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; -Using the Cua Cloud API, you can manage your Cua Cloud sandboxes (VMs) with Python or HTTP (curl). +Using the Cua Cloud API, you can manage your Cua Cloud sandboxes with Python or HTTP (curl). All examples require a CUA API key. You can obtain one from the [Dashboard](https://www.cua.ai/dashboard/keys). --- -## List VMs +## List Sandboxes ```python -import os import asyncio from computer.providers.cloud.provider import CloudProvider async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" + # CloudProvider automatically reads CUA_API_KEY from environment + # You can also pass api_key explicitly: CloudProvider(api_key="your-api-key") # Optional: point to a different API base # os.environ["CUA_API_BASE"] = "https://api.cua.ai" - provider = CloudProvider(api_key=api_key, verbose=False) + provider = CloudProvider(verbose=False) async with provider: vms = await provider.list_vms() for vm in vms: @@ -51,7 +51,7 @@ curl -H "Authorization: Bearer $CUA_API_KEY" \ Responses: -- 200: Array of minimal VM objects with fields `{ name, password, status }` +- 200: Array of minimal sandbox objects with fields `{ name, password, status }` - 401: Unauthorized (missing/invalid API key) ```json @@ -66,11 +66,11 @@ Responses: Status values: -- `pending`: VM deployment in progress -- `running`: VM is active and accessible -- `stopped`: VM is stopped but not terminated -- `terminated`: VM has been permanently destroyed -- `failed`: VM deployment or operation failed +- `pending`: Sandbox deployment in progress +- `running`: Sandbox is active and accessible +- `stopped`: Sandbox is stopped but not terminated +- `terminated`: Sandbox has been permanently destroyed +- `failed`: Sandbox deployment or operation failed --- @@ -80,23 +80,22 @@ Status values: --- -## Start a VM +## Start a Sandbox -Provide the VM name you want to start. +Provide the sandbox name you want to start. ```python -import os import asyncio from computer.providers.cloud.provider import CloudProvider async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" + # CloudProvider automatically reads CUA_API_KEY from environment name = "my-vm-name" # e.g., "m-linux-96lcxd2c2k" - provider = CloudProvider(api_key=api_key) + provider = CloudProvider() async with provider: resp = await provider.run_vm(name) print(resp) # { "name": name, "status": "starting" } @@ -118,7 +117,7 @@ Responses: - 204: No Content (start accepted) - 401: Unauthorized (missing/invalid API key) -- 404: VM not found or not owned by the user +- 404: Sandbox not found or not owned by the user ```text HTTP/1.1 204 No Content @@ -129,23 +128,22 @@ HTTP/1.1 204 No Content --- -## Stop a VM +## Stop a Sandbox -Stops the VM asynchronously. +Stops the sandbox asynchronously. ```python -import os import asyncio from computer.providers.cloud.provider import CloudProvider async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" + # CloudProvider automatically reads CUA_API_KEY from environment name = "my-vm-name" - provider = CloudProvider(api_key=api_key) + provider = CloudProvider() async with provider: resp = await provider.stop_vm(name) print(resp) # { "name": name, "status": "stopping" } @@ -167,7 +165,7 @@ Responses: - 202: Accepted with `{ "status": "stopping" }` - 401: Unauthorized (missing/invalid API key) -- 404: VM not found or not owned by the user +- 404: Sandbox not found or not owned by the user ```json { "status": "stopping" } @@ -178,23 +176,22 @@ Responses: --- -## Restart a VM +## Restart a Sandbox -Restarts the VM asynchronously. +Restarts the sandbox asynchronously. ```python -import os import asyncio from computer.providers.cloud.provider import CloudProvider async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" + # CloudProvider automatically reads CUA_API_KEY from environment name = "my-vm-name" - provider = CloudProvider(api_key=api_key) + provider = CloudProvider() async with provider: resp = await provider.restart_vm(name) print(resp) # { "name": name, "status": "restarting" } @@ -216,7 +213,7 @@ Responses: - 202: Accepted with `{ "status": "restarting" }` - 401: Unauthorized (missing/invalid API key) -- 404: VM not found or not owned by the user +- 404: Sandbox not found or not owned by the user ```json { "status": "restarting" } @@ -227,23 +224,22 @@ Responses: --- -## Query a VM by name +## Query a Sandbox by name -Query the computer-server running on the VM. Useful for checking details like status or OS type. +Query the computer-server running on the sandbox. Useful for checking details like status or OS type. ```python -import os import asyncio from computer.providers.cloud.provider import CloudProvider async def main(): - api_key = os.getenv("CUA_API_KEY") or "your-api-key" + # CloudProvider automatically reads CUA_API_KEY from environment name = "my-vm-name" - provider = CloudProvider(api_key=api_key) + provider = CloudProvider() async with provider: info = await provider.get_vm(name) print(info) diff --git a/docs/content/docs/computer-sdk/commands.mdx b/docs/content/docs/computer-sdk/commands.mdx index c7b5a39b..30818e2e 100644 --- a/docs/content/docs/computer-sdk/commands.mdx +++ b/docs/content/docs/computer-sdk/commands.mdx @@ -18,7 +18,7 @@ Execute shell commands and get detailed results: # Run shell command result = await computer.interface.run_command(cmd) # result.stdout, result.stderr, result.returncode ``` - + @@ -230,7 +230,7 @@ Control desktop environment features like wallpaper: env = await computer.interface.get_desktop_environment() print(env) # "xfce4" - # Set desktop wallpaper to an image file accessible on the VM + # Set desktop wallpaper to an image file accessible on the sandbox await computer.interface.set_wallpaper("/home/cua/shared/wallpaper.png") ``` @@ -241,7 +241,7 @@ Control desktop environment features like wallpaper: const env = await computer.interface.getDesktopEnvironment(); print(env) # "xfce4" - // Set desktop wallpaper to an image file accessible on the VM + // Set desktop wallpaper to an image file accessible on the sandbox await computer.interface.setWallpaper('/home/cua/shared/wallpaper.png'); ``` diff --git a/docs/content/docs/computer-sdk/computer-ui.mdx b/docs/content/docs/computer-sdk/computer-ui.mdx index c731e4c4..a51ef60d 100644 --- a/docs/content/docs/computer-sdk/computer-ui.mdx +++ b/docs/content/docs/computer-sdk/computer-ui.mdx @@ -1,7 +1,12 @@ --- -title: Computer UI +title: Computer UI (Deprecated) --- + + The Computer UI is deprecated and will be replaced with a revamped playground experience soon. We + recommend using VNC or Screen Sharing for precise control of the computer instead. + + The computer module includes a Gradio UI for creating and sharing demonstration data. We make it easy for people to build community datasets for better computer use models with an upload to Huggingface feature. ```bash diff --git a/docs/content/docs/computer-sdk/computers.mdx b/docs/content/docs/computer-sdk/computers.mdx index 238c12e0..2ae18315 100644 --- a/docs/content/docs/computer-sdk/computers.mdx +++ b/docs/content/docs/computer-sdk/computers.mdx @@ -1,29 +1,20 @@ --- -title: Cua Computers +title: Computer Types description: Understanding Cua computer types and connection methods --- - - A corresponding{' '} - - Jupyter Notebook - {' '} - and{' '} - - NodeJS project - {' '} - are available for this documentation. - +{/* prettier-ignore */} +A corresponding Jupyter Notebook and NodeJS project are available for this documentation. Before we can automate apps using AI, we need to first connect to a Computer Server to give the AI a safe environment to execute workflows in. -Cua Computers are preconfigured virtual machines running the Computer Server. They can be either macOS, Linux, or Windows. They're found in either a cloud-native container, or on your host desktop. +Cua Computers are preconfigured sandboxes running the Computer Server. They can be either macOS, Linux, or Windows. They're found in either a cloud-native sandbox, or on your host desktop. ## Cloud Sandbox **Easiest & safest way to get started - works on any host OS** -This is a Cloud Sandbox running the Computer Server. Get a container at [cua.ai](https://cua.ai/). +This is a Cloud Sandbox running the Computer Server. Get a sandbox at [cua.ai](https://cua.ai/). @@ -85,7 +76,7 @@ Cua provides two Docker images for running Linux desktops: os_type="linux", provider_type="docker", image="trycua/cua-xfce:latest", - name="my-xfce-container" + name="my-xfce-sandbox" ) await computer.run() # Launch & connect to Docker sandbox @@ -118,7 +109,7 @@ Cua provides two Docker images for running Linux desktops: os_type="linux", provider_type="docker", image="trycua/cua-ubuntu:latest", - name="my-kasm-container" + name="my-kasm-sandbox" ) await computer.run() # Launch & connect to Docker sandbox @@ -152,7 +143,7 @@ computer = Computer( await computer.run() # Launch & connect to Windows Sandbox ``` -## macOS VM +## macOS Sandbox **macOS hosts only - requires Lume CLI** @@ -162,7 +153,7 @@ await computer.run() # Launch & connect to Windows Sandbox /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" ``` -2. Start a local Cua macOS VM +2. Start a local Cua macOS sandbox ```bash lume run macos-sequoia-cua:latest diff --git a/docs/content/docs/agent-sdk/custom-computer-handlers.mdx b/docs/content/docs/computer-sdk/custom-computer-handlers.mdx similarity index 97% rename from docs/content/docs/agent-sdk/custom-computer-handlers.mdx rename to docs/content/docs/computer-sdk/custom-computer-handlers.mdx index c76a5d66..d1668883 100644 --- a/docs/content/docs/agent-sdk/custom-computer-handlers.mdx +++ b/docs/content/docs/computer-sdk/custom-computer-handlers.mdx @@ -34,7 +34,7 @@ You can then use this as a tool for your agent: from agent import ComputerAgent agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[custom_computer], ) @@ -122,7 +122,7 @@ class MyCustomComputer(AsyncComputerHandler): custom_computer = MyCustomComputer() agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[custom_computer], ) diff --git a/docs/content/docs/computer-sdk/meta.json b/docs/content/docs/computer-sdk/meta.json index 547dde17..e3b9c7e6 100644 --- a/docs/content/docs/computer-sdk/meta.json +++ b/docs/content/docs/computer-sdk/meta.json @@ -1,5 +1,12 @@ { "title": "Computer SDK", "description": "Build computer-using agents with the Computer SDK", - "pages": ["computers", "commands", "computer-ui", "tracing-api", "sandboxed-python"] + "pages": [ + "computers", + "commands", + "tracing-api", + "sandboxed-python", + "custom-computer-handlers", + "computer-ui" + ] } diff --git a/docs/content/docs/computer-sdk/sandboxed-python.mdx b/docs/content/docs/computer-sdk/sandboxed-python.mdx index 82d2809b..bb1c1e9c 100644 --- a/docs/content/docs/computer-sdk/sandboxed-python.mdx +++ b/docs/content/docs/computer-sdk/sandboxed-python.mdx @@ -33,7 +33,7 @@ def read_file(location: str) -> str: return f.read() async def main(): - async with Computer(os_type="linux", provider_type="cloud", name="my-container", api_key="...") as computer: + async with Computer(os_type="linux", provider_type="cloud", name="my-sandbox", api_key="...") as computer: # Call the sandboxed function (runs remotely) result = await read_file("/etc/hostname") print(result) @@ -60,7 +60,7 @@ await my_computer.venv_install("myenv", ["requests"]) You can use sandboxed functions to interact with macOS applications on a local Cua Computer (requires `os_type="darwin"`). This is particularly useful for automation tasks that involve GUI applications. ```python -# Example: Use sandboxed functions to execute code in a Cua Container +# Example: Use sandboxed functions to execute code in a Cua Sandbox from computer.helpers import sandboxed await computer.venv_install("demo_venv", ["macos-pyxa"]) # Install packages in a virtual environment @@ -71,10 +71,10 @@ def greet_and_print(name): import PyXA safari = PyXA.Application("Safari") html = safari.current_document.source() - print(f"Hello from inside the container, {name}!") + print(f"Hello from inside the sandbox, {name}!") return {"greeted": name, "safari_html": html} -# When a @sandboxed function is called, it will execute in the container +# When a @sandboxed function is called, it will execute in the sandbox result = await greet_and_print("Cua") # Result: {"greeted": "Cua", "safari_html": "..."} # stdout and stderr are also captured and printed / raised diff --git a/docs/content/docs/computer-sdk/tracing-api.mdx b/docs/content/docs/computer-sdk/tracing-api.mdx index 06c889f3..79b4b0a5 100644 --- a/docs/content/docs/computer-sdk/tracing-api.mdx +++ b/docs/content/docs/computer-sdk/tracing-api.mdx @@ -7,11 +7,6 @@ description: Record computer interactions for debugging, training, and analysis The Computer tracing API provides a powerful way to record computer interactions for debugging, training, analysis, and compliance purposes. Inspired by Playwright's tracing functionality, it offers flexible recording options and standardized output formats. - - The tracing API addresses GitHub issue #299 by providing a unified recording interface that works - with any Computer usage pattern, not just ComputerAgent. - - ## Overview The tracing API allows you to: diff --git a/docs/content/docs/example-usecases/form-filling.mdx b/docs/content/docs/example-usecases/form-filling.mdx index d9a61581..fd365a0f 100644 --- a/docs/content/docs/example-usecases/form-filling.mdx +++ b/docs/content/docs/example-usecases/form-filling.mdx @@ -1,9 +1,9 @@ --- -title: Form Filling +title: PDF to Form Automation description: Enhance and Automate Interactions Between Form Filling and Local File Systems --- -import { EditableCodeBlock, EditableValue, S } from '@/components/editable-code-block'; +import { Step, Steps } from 'fumadocs-ui/components/steps'; import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; ## Overview @@ -12,9 +12,17 @@ Cua can be used to automate interactions between form filling and local file sys This preset usecase uses [Cua Computer](/computer-sdk/computers) to interact with a web page and local file systems along with [Agent Loops](/agent-sdk/agent-loops) to run the agent in a loop with message history. -## Quickstart +--- -Create a `requirements.txt` file with the following dependencies: + + + + +### Set Up Your Environment + +First, install the required dependencies: + +Create a `requirements.txt` file: ```text cua-agent @@ -22,33 +30,32 @@ cua-computer python-dotenv>=1.0.0 ``` -And install: +Install the dependencies: ```bash pip install -r requirements.txt ``` -Create a `.env` file with the following environment variables: +Create a `.env` file with your API keys: ```text -ANTHROPIC_API_KEY=your-api-key +ANTHROPIC_API_KEY=your-anthropic-api-key CUA_API_KEY=sk_cua-api01... ``` -Select the environment you want to run the code in (_click on the underlined values in the code to edit them directly!_): + - - + - -{`import asyncio +### Create Your Form Filling Script + +Create a Python file (e.g., `form_filling.py`) and select your environment: + + + + +```python +import asyncio import logging import os import signal @@ -59,24 +66,24 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(**name**) +logger = logging.getLogger(__name__) def handle_sigint(sig, frame): -print("\\n\\nExecution interrupted by user. Exiting gracefully...") -exit(0) + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) async def fill_application(): -try: -async with Computer( -os_type="linux", -provider_type=VMProviderType.CLOUD, -name="`}{`", -api_key="`}{`", -verbosity=logging.INFO, -) as computer: + try: + async with Computer( + os_type="linux", + provider_type=VMProviderType.CLOUD, + name="your-sandbox-name", # Replace with your sandbox name + api_key=os.environ["CUA_API_KEY"], + verbosity=logging.INFO, + ) as computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer], only_n_most_recent_images=3, verbosity=logging.INFO, @@ -93,7 +100,7 @@ verbosity=logging.INFO, history = [] for i, task in enumerate(tasks, 1): - print(f"\\n[Task {i}/{len(tasks)}] {task}") + print(f"\n[Task {i}/{len(tasks)}] {task}") # Add user message to history history.append({"role": "user", "content": task}) @@ -116,7 +123,7 @@ verbosity=logging.INFO, print(f"βœ… Task {i}/{len(tasks)} completed") - print("\\nπŸŽ‰ All tasks completed successfully!") + print("\nπŸŽ‰ All tasks completed successfully!") except Exception as e: logger.error(f"Error in fill_application: {e}") @@ -124,18 +131,18 @@ verbosity=logging.INFO, raise def main(): -try: -load_dotenv() + try: + load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( - "Please set the ANTHROPIC_API_KEY environment variable.\\n" + "Please set the ANTHROPIC_API_KEY environment variable.\n" "You can add it to a .env file in the project root." ) if "CUA_API_KEY" not in os.environ: raise RuntimeError( - "Please set the CUA_API_KEY environment variable.\\n" + "Please set the CUA_API_KEY environment variable.\n" "You can add it to a .env file in the project root." ) @@ -147,22 +154,15 @@ load_dotenv() logger.error(f"Error running automation: {e}") traceback.print_exc() -if **name** == "**main**": -main()`} - - +if __name__ == "__main__": + main() +``` - + - -{`import asyncio +```python +import asyncio import logging import os import signal @@ -173,23 +173,23 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(**name**) +logger = logging.getLogger(__name__) def handle_sigint(sig, frame): -print("\\n\\nExecution interrupted by user. Exiting gracefully...") -exit(0) + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) async def fill_application(): -try: -async with Computer( -os_type="macos", -provider_type=VMProviderType.LUME, -name="`}{`", -verbosity=logging.INFO, -) as computer: + try: + async with Computer( + os_type="linux", + provider_type=VMProviderType.DOCKER, + image="trycua/cua-xfce:latest", # or "trycua/cua-ubuntu:latest" + verbosity=logging.INFO, + ) as computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer], only_n_most_recent_images=3, verbosity=logging.INFO, @@ -206,7 +206,7 @@ verbosity=logging.INFO, history = [] for i, task in enumerate(tasks, 1): - print(f"\\n[Task {i}/{len(tasks)}] {task}") + print(f"\n[Task {i}/{len(tasks)}] {task}") # Add user message to history history.append({"role": "user", "content": task}) @@ -229,7 +229,7 @@ verbosity=logging.INFO, print(f"βœ… Task {i}/{len(tasks)} completed") - print("\\nπŸŽ‰ All tasks completed successfully!") + print("\nπŸŽ‰ All tasks completed successfully!") except Exception as e: logger.error(f"Error in fill_application: {e}") @@ -237,12 +237,12 @@ verbosity=logging.INFO, raise def main(): -try: -load_dotenv() + try: + load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( - "Please set the ANTHROPIC_API_KEY environment variable.\\n" + "Please set the ANTHROPIC_API_KEY environment variable.\n" "You can add it to a .env file in the project root." ) @@ -254,20 +254,15 @@ load_dotenv() logger.error(f"Error running automation: {e}") traceback.print_exc() -if **name** == "**main**": -main()`} - - +if __name__ == "__main__": + main() +``` - + - -{`import asyncio +```python +import asyncio import logging import os import signal @@ -278,22 +273,23 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(**name**) +logger = logging.getLogger(__name__) def handle_sigint(sig, frame): -print("\\n\\nExecution interrupted by user. Exiting gracefully...") -exit(0) + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) async def fill_application(): -try: -async with Computer( -os_type="windows", -provider_type=VMProviderType.WINDOWS_SANDBOX, -verbosity=logging.INFO, -) as computer: + try: + async with Computer( + os_type="macos", + provider_type=VMProviderType.LUME, + name="macos-sequoia-cua:latest", + verbosity=logging.INFO, + ) as computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer], only_n_most_recent_images=3, verbosity=logging.INFO, @@ -310,7 +306,7 @@ verbosity=logging.INFO, history = [] for i, task in enumerate(tasks, 1): - print(f"\\n[Task {i}/{len(tasks)}] {task}") + print(f"\n[Task {i}/{len(tasks)}] {task}") # Add user message to history history.append({"role": "user", "content": task}) @@ -333,7 +329,7 @@ verbosity=logging.INFO, print(f"βœ… Task {i}/{len(tasks)} completed") - print("\\nπŸŽ‰ All tasks completed successfully!") + print("\nπŸŽ‰ All tasks completed successfully!") except Exception as e: logger.error(f"Error in fill_application: {e}") @@ -341,12 +337,12 @@ verbosity=logging.INFO, raise def main(): -try: -load_dotenv() + try: + load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( - "Please set the ANTHROPIC_API_KEY environment variable.\\n" + "Please set the ANTHROPIC_API_KEY environment variable.\n" "You can add it to a .env file in the project root." ) @@ -358,22 +354,15 @@ load_dotenv() logger.error(f"Error running automation: {e}") traceback.print_exc() -if **name** == "**main**": -main()`} - - +if __name__ == "__main__": + main() +``` - + - -{`import asyncio +```python +import asyncio import logging import os import signal @@ -384,23 +373,22 @@ from computer import Computer, VMProviderType from dotenv import load_dotenv logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(**name**) +logger = logging.getLogger(__name__) def handle_sigint(sig, frame): -print("\\n\\nExecution interrupted by user. Exiting gracefully...") -exit(0) + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) async def fill_application(): -try: -async with Computer( -os_type="linux", -provider_type=VMProviderType.DOCKER, -name="`}{`", -verbosity=logging.INFO, -) as computer: + try: + async with Computer( + os_type="windows", + provider_type=VMProviderType.WINDOWS_SANDBOX, + verbosity=logging.INFO, + ) as computer: agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="cua/anthropic/claude-sonnet-4.5", tools=[computer], only_n_most_recent_images=3, verbosity=logging.INFO, @@ -417,7 +405,7 @@ verbosity=logging.INFO, history = [] for i, task in enumerate(tasks, 1): - print(f"\\n[Task {i}/{len(tasks)}] {task}") + print(f"\n[Task {i}/{len(tasks)}] {task}") # Add user message to history history.append({"role": "user", "content": task}) @@ -440,7 +428,7 @@ verbosity=logging.INFO, print(f"βœ… Task {i}/{len(tasks)} completed") - print("\\nπŸŽ‰ All tasks completed successfully!") + print("\nπŸŽ‰ All tasks completed successfully!") except Exception as e: logger.error(f"Error in fill_application: {e}") @@ -448,12 +436,12 @@ verbosity=logging.INFO, raise def main(): -try: -load_dotenv() + try: + load_dotenv() if "ANTHROPIC_API_KEY" not in os.environ: raise RuntimeError( - "Please set the ANTHROPIC_API_KEY environment variable.\\n" + "Please set the ANTHROPIC_API_KEY environment variable.\n" "You can add it to a .env file in the project root." ) @@ -465,16 +453,42 @@ load_dotenv() logger.error(f"Error running automation: {e}") traceback.print_exc() -if **name** == "**main**": -main()`} - - +if __name__ == "__main__": + main() +``` + + + + +### Run Your Script + +Execute your form filling automation: + +```bash +python form_filling.py +``` + +The agent will: + +1. Download the PDF resume from Overleaf +2. Extract information from the PDF +3. Fill out the JotForm with the extracted information + +Monitor the output to see the agent's progress through each task. + + + + + +--- + ## Next Steps - Learn more about [Cua computers](/computer-sdk/computers) and [computer commands](/computer-sdk/commands) - Read about [Agent loops](/agent-sdk/agent-loops), [tools](/agent-sdk/custom-tools), and [supported model providers](/agent-sdk/supported-model-providers/) - Experiment with different [Models and Providers](/agent-sdk/supported-model-providers/) +- Join our [Discord community](https://discord.com/invite/mVnXXpdE85) for help diff --git a/docs/content/docs/example-usecases/gemini-complex-ui-navigation.mdx b/docs/content/docs/example-usecases/gemini-complex-ui-navigation.mdx new file mode 100644 index 00000000..4a3d1d0f --- /dev/null +++ b/docs/content/docs/example-usecases/gemini-complex-ui-navigation.mdx @@ -0,0 +1,640 @@ +--- +title: GUI Grounding with Gemini 3 +description: Using Google's Gemini 3 with OmniParser for Advanced GUI Grounding Tasks +--- + +import { Step, Steps } from 'fumadocs-ui/components/steps'; +import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; +import { Callout } from 'fumadocs-ui/components/callout'; + +## Overview + +This example demonstrates how to use Google's Gemini 3 models with OmniParser for complex GUI grounding tasks. Gemini 3 Pro achieves exceptional performance on the [ScreenSpot-Pro benchmark](https://github.com/likaixin2000/ScreenSpot-Pro-GUI-Grounding) with a **72.7% accuracy** (compared to Claude Sonnet 4.5's 36.2%), making it ideal for precise UI element location and complex navigation tasks. + +Demo of Gemini 3 with OmniParser performing complex GUI navigation tasks + + + According to [Google's Gemini 3 announcement](https://blog.google/products/gemini/gemini-3/), + Gemini 3 Pro achieves: - **72.7%** on ScreenSpot-Pro (vs. Gemini 2.5 Pro's 11.4%) - + Industry-leading performance on complex UI navigation tasks - Advanced multimodal understanding + for high-resolution screens + + +### What You'll Build + +This guide shows how to: + +- Set up Vertex AI with proper authentication +- Use OmniParser with Gemini 3 for GUI element detection +- Leverage Gemini 3-specific features like `thinking_level` and `media_resolution` +- Create agents that can perform complex multi-step UI interactions + +--- + + + + + +### Set Up Google Cloud and Vertex AI + +Before using Gemini 3 models, you need to enable Vertex AI in Google Cloud Console. + +#### 1. Create a Google Cloud Project + +1. Go to [Google Cloud Console](https://console.cloud.google.com/) +2. Click **Select a project** β†’ **New Project** +3. Enter a project name and click **Create** +4. Note your **Project ID** (you'll need this later) + +#### 2. Enable Vertex AI API + +1. Navigate to [Vertex AI API](https://console.cloud.google.com/apis/library/aiplatform.googleapis.com) +2. Select your project +3. Click **Enable** + +#### 3. Enable Billing + +1. Go to [Billing](https://console.cloud.google.com/billing) +2. Link a billing account to your project +3. Vertex AI offers a [free tier](https://cloud.google.com/vertex-ai/pricing) for testing + +#### 4. Create a Service Account + +1. Go to [IAM & Admin > Service Accounts](https://console.cloud.google.com/iam-admin/serviceaccounts) +2. Click **Create Service Account** +3. Enter a name (e.g., "cua-gemini-agent") +4. Click **Create and Continue** +5. Grant the **Vertex AI User** role +6. Click **Done** + +#### 5. Create and Download Service Account Key + +1. Click on your newly created service account +2. Go to **Keys** tab +3. Click **Add Key** β†’ **Create new key** +4. Select **JSON** format +5. Click **Create** (the key file will download automatically) +6. **Important**: Store this key file securely! It contains credentials for accessing your Google Cloud resources + + + Never commit your service account JSON key to version control! Add it to `.gitignore` immediately. + + + + + + +### Install Dependencies + +Install the required packages for OmniParser and Gemini 3: + +Create a `requirements.txt` file: + +```text +cua-agent +cua-computer +cua-som # OmniParser for GUI element detection +litellm>=1.0.0 +python-dotenv>=1.0.0 +google-cloud-aiplatform>=1.70.0 +``` + +Install the dependencies: + +```bash +pip install -r requirements.txt +``` + + + + + +### Configure Environment Variables + +Create a `.env` file in your project root: + +```text +# Google Cloud / Vertex AI credentials +GOOGLE_CLOUD_PROJECT=your-project-id +GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-service-account-key.json + +# Cua credentials (for cloud sandboxes) +CUA_API_KEY=sk_cua-api01... +CUA_SANDBOX_NAME=your-sandbox-name +``` + +Replace the values: + +- `your-project-id`: Your Google Cloud Project ID from Step 1 +- `/path/to/your-service-account-key.json`: Path to the JSON key file you downloaded +- `sk_cua-api01...`: Your Cua API key from the [Cua dashboard](https://cua.dev) +- `your-sandbox-name`: Your sandbox name (if using cloud sandboxes) + + + + + +### Create Your Complex UI Navigation Script + +Create a Python file (e.g., `gemini_ui_navigation.py`): + + + + +```python +import asyncio +import logging +import os +import signal +import traceback + +from agent import ComputerAgent +from computer import Computer, VMProviderType +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def handle_sigint(sig, frame): + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) + +async def complex_ui_navigation(): + """ + Demonstrate Gemini 3's exceptional UI grounding capabilities + with complex, multi-step navigation tasks. + """ + try: + async with Computer( + os_type="linux", + provider_type=VMProviderType.CLOUD, + name=os.environ["CUA_SANDBOX_NAME"], + api_key=os.environ["CUA_API_KEY"], + verbosity=logging.INFO, + ) as computer: + + agent = ComputerAgent( + # Use OmniParser with Gemini 3 Pro for optimal GUI grounding + model="omniparser+vertex_ai/gemini-3-pro-preview", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=False, + max_trajectory_budget=5.0, + # Gemini 3-specific parameters + thinking_level="high", # Enables deeper reasoning (vs "low") + media_resolution="high", # High-resolution image processing (vs "low" or "medium") + ) + + # Complex GUI grounding tasks inspired by ScreenSpot-Pro benchmark + # These test precise element location in professional UIs + tasks = [ + # Task 1: GitHub repository navigation + { + "instruction": ( + "Go to github.com/trycua/cua. " + "Find and click on the 'Issues' tab. " + "Then locate and click on the search box within the issues page " + "(not the global GitHub search). " + "Type 'omniparser' and press Enter." + ), + "description": "Tests precise UI element distinction in a complex interface", + }, + + # Task 2: Search for and install Visual Studio Code + { + "instruction": ( + "Open your system's app store (e.g., Microsoft Store). " + "Search for 'Visual Studio Code'. " + "In the search results, select 'Visual Studio Code'. " + "Click on 'Install' or 'Get' to begin the installation. " + "If prompted, accept any permissions or confirm the installation. " + "Wait for Visual Studio Code to finish installing." + ), + "description": "Tests the ability to search for an application and complete its installation through a step-by-step app store workflow.", + }, + ] + + history = [] + + for i, task_info in enumerate(tasks, 1): + task = task_info["instruction"] + print(f"\n{'='*60}") + print(f"[Task {i}/{len(tasks)}] {task_info['description']}") + print(f"{'='*60}") + print(f"\nInstruction: {task}\n") + + # Add user message to history + history.append({"role": "user", "content": task}) + + # Run agent with conversation history + async for result in agent.run(history, stream=False): + history += result.get("output", []) + + # Print output for debugging + for item in result.get("output", []): + if item.get("type") == "message": + content = item.get("content", []) + for content_part in content: + if content_part.get("text"): + logger.info(f"Agent: {content_part.get('text')}") + elif item.get("type") == "computer_call": + action = item.get("action", {}) + action_type = action.get("type", "") + logger.debug(f"Computer Action: {action_type}") + + print(f"\nβœ… Task {i}/{len(tasks)} completed") + + print("\nπŸŽ‰ All complex UI navigation tasks completed successfully!") + + except Exception as e: + logger.error(f"Error in complex_ui_navigation: {e}") + traceback.print_exc() + raise + +def main(): + try: + load_dotenv() + + # Validate required environment variables + required_vars = [ + "GOOGLE_CLOUD_PROJECT", + "GOOGLE_APPLICATION_CREDENTIALS", + "CUA_API_KEY", + "CUA_SANDBOX_NAME", + ] + + missing_vars = [var for var in required_vars if not os.environ.get(var)] + if missing_vars: + raise RuntimeError( + f"Missing required environment variables: {', '.join(missing_vars)}\n" + f"Please check your .env file and ensure all keys are set.\n" + f"See the setup guide for details on configuring Vertex AI credentials." + ) + + signal.signal(signal.SIGINT, handle_sigint) + + asyncio.run(complex_ui_navigation()) + + except Exception as e: + logger.error(f"Error running automation: {e}") + traceback.print_exc() + +if __name__ == "__main__": + main() +``` + + + + +```python +import asyncio +import logging +import os +import signal +import traceback + +from agent import ComputerAgent +from computer import Computer, VMProviderType +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def handle_sigint(sig, frame): + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) + +async def complex_ui_navigation(): + """ + Demonstrate Gemini 3's exceptional UI grounding capabilities + with complex, multi-step navigation tasks. + """ + try: + async with Computer( + os_type="linux", + provider_type=VMProviderType.DOCKER, + image="trycua/cua-xfce:latest", + verbosity=logging.INFO, + ) as computer: + + agent = ComputerAgent( + # Use OmniParser with Gemini 3 Pro for optimal GUI grounding + model="omniparser+vertex_ai/gemini-3-pro-preview", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=False, + max_trajectory_budget=5.0, + # Gemini 3-specific parameters + thinking_level="high", # Enables deeper reasoning (vs "low") + media_resolution="high", # High-resolution image processing (vs "low" or "medium") + ) + + # Complex GUI grounding tasks inspired by ScreenSpot-Pro benchmark + tasks = [ + { + "instruction": ( + "Go to github.com/trycua/cua. " + "Find and click on the 'Issues' tab. " + "Then locate and click on the search box within the issues page " + "(not the global GitHub search). " + "Type 'omniparser' and press Enter." + ), + "description": "Tests precise UI element distinction in a complex interface", + }, + ] + + history = [] + + for i, task_info in enumerate(tasks, 1): + task = task_info["instruction"] + print(f"\n{'='*60}") + print(f"[Task {i}/{len(tasks)}] {task_info['description']}") + print(f"{'='*60}") + print(f"\nInstruction: {task}\n") + + history.append({"role": "user", "content": task}) + + async for result in agent.run(history, stream=False): + history += result.get("output", []) + + for item in result.get("output", []): + if item.get("type") == "message": + content = item.get("content", []) + for content_part in content: + if content_part.get("text"): + logger.info(f"Agent: {content_part.get('text')}") + elif item.get("type") == "computer_call": + action = item.get("action", {}) + action_type = action.get("type", "") + logger.debug(f"Computer Action: {action_type}") + + print(f"\nβœ… Task {i}/{len(tasks)} completed") + + print("\nπŸŽ‰ All complex UI navigation tasks completed successfully!") + + except Exception as e: + logger.error(f"Error in complex_ui_navigation: {e}") + traceback.print_exc() + raise + +def main(): + try: + load_dotenv() + + required_vars = [ + "GOOGLE_CLOUD_PROJECT", + "GOOGLE_APPLICATION_CREDENTIALS", + ] + + missing_vars = [var for var in required_vars if not os.environ.get(var)] + if missing_vars: + raise RuntimeError( + f"Missing required environment variables: {', '.join(missing_vars)}\n" + f"Please check your .env file." + ) + + signal.signal(signal.SIGINT, handle_sigint) + + asyncio.run(complex_ui_navigation()) + + except Exception as e: + logger.error(f"Error running automation: {e}") + traceback.print_exc() + +if __name__ == "__main__": + main() +``` + + + + +```python +import asyncio +import logging +import os +import signal +import traceback + +from agent import ComputerAgent +from computer import Computer, VMProviderType +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def handle_sigint(sig, frame): + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) + +async def complex_ui_navigation(): + """ + Demonstrate Gemini 3's exceptional UI grounding capabilities + with complex, multi-step navigation tasks. + """ + try: + async with Computer( + os_type="macos", + provider_type=VMProviderType.LUME, + name="macos-sequoia-cua:latest", + verbosity=logging.INFO, + ) as computer: + + agent = ComputerAgent( + # Use OmniParser with Gemini 3 Pro for optimal GUI grounding + model="omniparser+vertex_ai/gemini-3-pro-preview", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=False, + max_trajectory_budget=5.0, + # Gemini 3-specific parameters + thinking_level="high", # Enables deeper reasoning (vs "low") + media_resolution="high", # High-resolution image processing (vs "low" or "medium") + ) + + # Complex GUI grounding tasks inspired by ScreenSpot-Pro benchmark + tasks = [ + { + "instruction": ( + "Go to github.com/trycua/cua. " + "Find and click on the 'Issues' tab. " + "Then locate and click on the search box within the issues page " + "(not the global GitHub search). " + "Type 'omniparser' and press Enter." + ), + "description": "Tests precise UI element distinction in a complex interface", + }, + ] + + history = [] + + for i, task_info in enumerate(tasks, 1): + task = task_info["instruction"] + print(f"\n{'='*60}") + print(f"[Task {i}/{len(tasks)}] {task_info['description']}") + print(f"{'='*60}") + print(f"\nInstruction: {task}\n") + + history.append({"role": "user", "content": task}) + + async for result in agent.run(history, stream=False): + history += result.get("output", []) + + for item in result.get("output", []): + if item.get("type") == "message": + content = item.get("content", []) + for content_part in content: + if content_part.get("text"): + logger.info(f"Agent: {content_part.get('text')}") + elif item.get("type") == "computer_call": + action = item.get("action", {}) + action_type = action.get("type", "") + logger.debug(f"Computer Action: {action_type}") + + print(f"\nβœ… Task {i}/{len(tasks)} completed") + + print("\nπŸŽ‰ All complex UI navigation tasks completed successfully!") + + except Exception as e: + logger.error(f"Error in complex_ui_navigation: {e}") + traceback.print_exc() + raise + +def main(): + try: + load_dotenv() + + required_vars = [ + "GOOGLE_CLOUD_PROJECT", + "GOOGLE_APPLICATION_CREDENTIALS", + ] + + missing_vars = [var for var in required_vars if not os.environ.get(var)] + if missing_vars: + raise RuntimeError( + f"Missing required environment variables: {', '.join(missing_vars)}\n" + f"Please check your .env file." + ) + + signal.signal(signal.SIGINT, handle_sigint) + + asyncio.run(complex_ui_navigation()) + + except Exception as e: + logger.error(f"Error running automation: {e}") + traceback.print_exc() + +if __name__ == "__main__": + main() +``` + + + + + + + + +### Run Your Script + +Execute your complex UI navigation automation: + +```bash +python gemini_ui_navigation.py +``` + +The agent will: + +1. Navigate to GitHub and locate specific UI elements +2. Distinguish between similar elements (e.g., global search vs. issues search) +3. Perform multi-step interactions with visual feedback +4. Use Gemini 3's advanced reasoning for precise element grounding + +Monitor the output to see the agent's progress through each task. + + + + + +--- + +## Understanding Gemini 3-Specific Parameters + +### `thinking_level` + +Controls the amount of internal reasoning the model performs: + +- `"high"`: Deeper reasoning, better for complex UI navigation (recommended for ScreenSpot-like tasks) +- `"low"`: Faster responses, suitable for simpler tasks + +### `media_resolution` + +Controls vision processing for multimodal inputs: + +- `"high"`: Best for complex UIs with many small elements (recommended) +- `"medium"`: Balanced quality and speed +- `"low"`: Faster processing for simple interfaces + + + For tasks requiring precise GUI element location (like ScreenSpot-Pro), use + `thinking_level="high"` and `media_resolution="high"` for optimal performance. + + +--- + +## Benchmark Performance + +Gemini 3 Pro's performance on ScreenSpot-Pro demonstrates its exceptional UI grounding capabilities: + +| Model | ScreenSpot-Pro Score | +| ----------------- | -------------------- | +| **Gemini 3 Pro** | **72.7%** | +| Claude Sonnet 4.5 | 36.2% | +| Gemini 2.5 Pro | 11.4% | +| GPT-5.1 | 3.5% | + +This makes Gemini 3 the ideal choice for complex UI navigation, element detection, and professional GUI automation tasks. + +--- + +## Troubleshooting + +### Authentication Issues + +If you encounter authentication errors: + +1. Verify your service account JSON key path is correct +2. Ensure the service account has the **Vertex AI User** role +3. Check that the Vertex AI API is enabled in your project +4. Confirm your `GOOGLE_CLOUD_PROJECT` matches your actual project ID + +### "Vertex AI API not enabled" Error + +Run this command to enable the API: + +```bash +gcloud services enable aiplatform.googleapis.com --project=YOUR_PROJECT_ID +``` + +### Billing Issues + +Ensure billing is enabled for your Google Cloud project. Visit the [Billing section](https://console.cloud.google.com/billing) to verify. + +--- + +## Next Steps + +- Learn more about [OmniParser agent loops](/agent-sdk/agent-loops) +- Explore [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) +- Read about [ScreenSpot-Pro benchmark](https://github.com/likaixin2000/ScreenSpot-Pro-GUI-Grounding) +- Check out [Google's Gemini 3 announcement](https://blog.google/products/gemini/gemini-3/) +- Join our [Discord community](https://discord.com/invite/mVnXXpdE85) for help diff --git a/docs/content/docs/example-usecases/meta.json b/docs/content/docs/example-usecases/meta.json index 60bba1c3..ca970219 100644 --- a/docs/content/docs/example-usecases/meta.json +++ b/docs/content/docs/example-usecases/meta.json @@ -1,5 +1,10 @@ { - "title": "Example Use Cases", + "title": "Cookbook", "description": "Real-world examples of building with Cua", - "pages": ["form-filling"] + "pages": [ + "windows-app-behind-vpn", + "form-filling", + "post-event-contact-export", + "gemini-complex-ui-navigation" + ] } diff --git a/docs/content/docs/example-usecases/post-event-contact-export.mdx b/docs/content/docs/example-usecases/post-event-contact-export.mdx new file mode 100644 index 00000000..8324f5cd --- /dev/null +++ b/docs/content/docs/example-usecases/post-event-contact-export.mdx @@ -0,0 +1,474 @@ +--- +title: Post-Event Contact Export +description: Run overnight contact extraction from LinkedIn, X, or other social platforms after networking events +--- + +import { Step, Steps } from 'fumadocs-ui/components/steps'; +import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; + +## Overview + +After networking events, you need to export new connections from LinkedIn, X, or other platforms into your CRM. This automation handles it for you. + +**The workflow**: Kick off the script after an event and let it run overnight. Wake up to a clean CSV ready for your CRM or email tool. + +This example focuses on LinkedIn but works across platforms. It uses [Cua Computer](/computer-sdk/computers) to interact with web interfaces and [Agent Loops](/agent-sdk/agent-loops) to iterate through connections with conversation history. + +### Why Cua is Perfect for This + +**Cua's VMs save your session data**, bypassing bot detection entirely: + +- **Log in once manually** through the VM browser +- **Session persists** - you appear as a regular user, not a bot +- **No captchas** - the platform treats automation like normal browsing +- **No login code** - script doesn't handle authentication +- **Run overnight** - kick off and forget + +Traditional web scraping triggers anti-bot measures immediately. Cua's approach works across all platforms. + +### What You Get + +The script generates two files with your extracted connections: + +**CSV Export** (`linkedin_connections_20250116_143022.csv`): + +```csv +first,last,role,company,met_at,linkedin +John,Smith,Software Engineer,Acme Corp,Google Devfest Toronto,https://www.linkedin.com/in/johnsmith +Sarah,Johnson,Product Manager,Tech Inc,Google Devfest Toronto,https://www.linkedin.com/in/sarahjohnson +``` + +**Messaging Links** (`linkedin_messaging_links_20250116_143022.txt`): + +``` +LinkedIn Messaging Compose Links +================================================================================ + +1. https://www.linkedin.com/messaging/compose/?recipient=johnsmith +2. https://www.linkedin.com/messaging/compose/?recipient=sarahjohnson +``` + +--- + + + + + +### Set Up Your Environment + +First, install the required dependencies: + +Create a `requirements.txt` file: + +```text +cua-agent +cua-computer +python-dotenv>=1.0.0 +``` + +Install the dependencies: + +```bash +pip install -r requirements.txt +``` + +Create a `.env` file with your API keys: + +```text +ANTHROPIC_API_KEY=your-anthropic-api-key +CUA_API_KEY=sk_cua-api01... +CUA_CONTAINER_NAME=m-linux-... +``` + + + + + +### Log Into LinkedIn Manually + +**Important**: Before running the script, manually log into LinkedIn through your VM: + +1. Access your VM through the Cua dashboard +2. Open a browser and navigate to LinkedIn +3. Log in with your credentials (handle any captchas manually) +4. Close the browser but leave the VM running +5. Your session is now saved and ready for automation! + +This one-time manual login bypasses all bot detection. + + + + + +### Configure and Create Your Script + +Create a Python file (e.g., `contact_export.py`). You can customize: + +```python +# Where you met these connections (automatically added to CSV) +MET_AT_REASON = "Google Devfest Toronto" + +# Number of contacts to extract (in the main loop) +for contact_num in range(1, 21): # Change 21 to extract more/fewer contacts +``` + +Select your environment: + + + + +```python +import asyncio +import csv +import logging +import os +import signal +import traceback +from datetime import datetime + +from agent import ComputerAgent +from computer import Computer, VMProviderType +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Configuration: Define where you met these connections +MET_AT_REASON = "Google Devfest Toronto" + +def handle_sigint(sig, frame): + print("\n\nExecution interrupted by user. Exiting gracefully...") + exit(0) + +def extract_public_id_from_linkedin_url(linkedin_url): + """Extract public ID from LinkedIn profile URL.""" + if not linkedin_url: + return None + + url = linkedin_url.split('?')[0].rstrip('/') + + if '/in/' in url: + public_id = url.split('/in/')[-1] + return public_id + + return None + +def extract_contact_from_response(result_output): + """ + Extract contact information from agent's response. + Expects format: + FIRST: value + LAST: value + ROLE: value + COMPANY: value + LINKEDIN: value + """ + contact = { + 'first': '', + 'last': '', + 'role': '', + 'company': '', + 'met_at': MET_AT_REASON, + 'linkedin': '' + } + + for item in result_output: + if item.get("type") == "message": + content = item.get("content", []) + for content_part in content: + text = content_part.get("text", "") + if text: + for line in text.split('\n'): + line = line.strip() + line_upper = line.upper() + + if line_upper.startswith("FIRST:"): + value = line[6:].strip() + if value and value.upper() != "N/A": + contact['first'] = value + elif line_upper.startswith("LAST:"): + value = line[5:].strip() + if value and value.upper() != "N/A": + contact['last'] = value + elif line_upper.startswith("ROLE:"): + value = line[5:].strip() + if value and value.upper() != "N/A": + contact['role'] = value + elif line_upper.startswith("COMPANY:"): + value = line[8:].strip() + if value and value.upper() != "N/A": + contact['company'] = value + elif line_upper.startswith("LINKEDIN:"): + value = line[9:].strip() + if value and value.upper() != "N/A": + contact['linkedin'] = value + + return contact + +async def scrape_linkedin_connections(): + """Scrape LinkedIn connections and export to CSV.""" + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + csv_filename = f"linkedin_connections_{timestamp}.csv" + csv_path = os.path.join(os.getcwd(), csv_filename) + + # Initialize CSV file + with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=['first', 'last', 'role', 'company', 'met_at', 'linkedin']) + writer.writeheader() + + print(f"\nπŸš€ Starting LinkedIn connections scraper") + print(f"πŸ“ Output file: {csv_path}") + print(f"πŸ“ Met at: {MET_AT_REASON}") + print("=" * 80) + + try: + async with Computer( + os_type="linux", + provider_type=VMProviderType.CLOUD, + name=os.environ["CUA_CONTAINER_NAME"], # Your sandbox name + api_key=os.environ["CUA_API_KEY"], + verbosity=logging.INFO, + ) as computer: + + agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=True, + max_trajectory_budget=10.0, + ) + + history = [] + + # Task 1: Navigate to LinkedIn connections page + navigation_task = ( + "STEP 1 - NAVIGATE TO LINKEDIN CONNECTIONS PAGE:\n" + "1. Open a web browser (Chrome or Firefox)\n" + "2. Navigate to https://www.linkedin.com/mynetwork/invite-connect/connections/\n" + "3. Wait for the page to fully load\n" + "4. Confirm you can see the list of connections\n" + "5. Ready to start extracting contacts" + ) + + print(f"\n[Task 1/21] Navigating to LinkedIn...") + history.append({"role": "user", "content": navigation_task}) + + async for result in agent.run(history, stream=False): + history += result.get("output", []) + + print(f"βœ… Navigation completed\n") + + # Extract 20 contacts + contacts_extracted = 0 + linkedin_urls = [] + previous_contact_name = None + + for contact_num in range(1, 21): + # Build extraction task + if contact_num == 1: + extraction_task = ( + f"STEP {contact_num + 1} - EXTRACT CONTACT {contact_num} OF 20:\n" + f"1. Click on the first connection's profile\n" + f"2. Extract: FIRST, LAST, ROLE, COMPANY, LINKEDIN URL\n" + f"3. Return in exact format:\n" + f"FIRST: [value]\n" + f"LAST: [value]\n" + f"ROLE: [value]\n" + f"COMPANY: [value]\n" + f"LINKEDIN: [value]\n" + f"4. Navigate back to connections list" + ) + else: + extraction_task = ( + f"STEP {contact_num + 1} - EXTRACT CONTACT {contact_num} OF 20:\n" + f"1. Find '{previous_contact_name}' in the list\n" + f"2. Click on the contact BELOW them\n" + f"3. Extract: FIRST, LAST, ROLE, COMPANY, LINKEDIN URL\n" + f"4. Return in exact format:\n" + f"FIRST: [value]\n" + f"LAST: [value]\n" + f"ROLE: [value]\n" + f"COMPANY: [value]\n" + f"LINKEDIN: [value]\n" + f"5. Navigate back" + ) + + print(f"[Task {contact_num + 1}/21] Extracting contact {contact_num}/20...") + history.append({"role": "user", "content": extraction_task}) + + all_output = [] + async for result in agent.run(history, stream=False): + output = result.get("output", []) + history += output + all_output.extend(output) + + contact_data = extract_contact_from_response(all_output) + + has_name = bool(contact_data['first'] and contact_data['last']) + has_linkedin = bool(contact_data['linkedin'] and 'linkedin.com' in contact_data['linkedin']) + + if has_name or has_linkedin: + with open(csv_path, 'a', newline='', encoding='utf-8') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=['first', 'last', 'role', 'company', 'met_at', 'linkedin']) + writer.writerow(contact_data) + contacts_extracted += 1 + + if contact_data['linkedin']: + linkedin_urls.append(contact_data['linkedin']) + + if has_name: + previous_contact_name = f"{contact_data['first']} {contact_data['last']}".strip() + + name_str = f"{contact_data['first']} {contact_data['last']}" if has_name else "[No name]" + print(f"βœ… Contact {contact_num}/20 saved: {name_str}") + else: + print(f"⚠️ Could not extract valid data for contact {contact_num}") + + if contact_num % 5 == 0: + print(f"\nπŸ“ˆ Progress: {contacts_extracted}/{contact_num} contacts extracted\n") + + # Create messaging links file + messaging_filename = f"linkedin_messaging_links_{timestamp}.txt" + messaging_path = os.path.join(os.getcwd(), messaging_filename) + + with open(messaging_path, 'w', encoding='utf-8') as txtfile: + txtfile.write("LinkedIn Messaging Compose Links\n") + txtfile.write("=" * 80 + "\n\n") + + for i, linkedin_url in enumerate(linkedin_urls, 1): + public_id = extract_public_id_from_linkedin_url(linkedin_url) + if public_id: + messaging_url = f"https://www.linkedin.com/messaging/compose/?recipient={public_id}" + txtfile.write(f"{i}. {messaging_url}\n") + + print("\n" + "="*80) + print("πŸŽ‰ All tasks completed!") + print(f"πŸ“ CSV file saved to: {csv_path}") + print(f"πŸ“Š Total contacts extracted: {contacts_extracted}/20") + print(f"πŸ’¬ Messaging links saved to: {messaging_path}") + print("="*80) + + except Exception as e: + print(f"\n❌ Error: {e}") + traceback.print_exc() + raise + +def main(): + try: + load_dotenv() + + if "ANTHROPIC_API_KEY" not in os.environ: + raise RuntimeError("Please set ANTHROPIC_API_KEY in .env") + + if "CUA_API_KEY" not in os.environ: + raise RuntimeError("Please set CUA_API_KEY in .env") + + if "CUA_CONTAINER_NAME" not in os.environ: + raise RuntimeError("Please set CUA_CONTAINER_NAME in .env") + + signal.signal(signal.SIGINT, handle_sigint) + + asyncio.run(scrape_linkedin_connections()) + + except Exception as e: + print(f"\n❌ Error: {e}") + traceback.print_exc() + +if __name__ == "__main__": + main() +``` + + + + +```python +# Same code as Cloud Sandbox, but change Computer initialization to: +async with Computer( + os_type="linux", + provider_type=VMProviderType.DOCKER, + image="trycua/cua-xfce:latest", + verbosity=logging.INFO, +) as computer: +``` + +And remove the `CUA_API_KEY` and `CUA_CONTAINER_NAME` requirements from `.env` and the validation checks. + + + + +```python +# Same code as Cloud Sandbox, but change Computer initialization to: +async with Computer( + os_type="macos", + provider_type=VMProviderType.LUME, + name="macos-sequoia-cua:latest", + verbosity=logging.INFO, +) as computer: +``` + +And remove the `CUA_API_KEY` and `CUA_CONTAINER_NAME` requirements from `.env` and the validation checks. + + + + +```python +# Same code as Cloud Sandbox, but change Computer initialization to: +async with Computer( + os_type="windows", + provider_type=VMProviderType.WINDOWS_SANDBOX, + verbosity=logging.INFO, +) as computer: +``` + +And remove the `CUA_API_KEY` and `CUA_CONTAINER_NAME` requirements from `.env` and the validation checks. + + + + + + + + +### Run Your Script + +Execute your contact extraction automation: + +```bash +python contact_export.py +``` + +The agent will: + +1. Navigate to your LinkedIn connections page +2. Extract data from 20 contacts (first name, last name, role, company, LinkedIn URL) +3. Save contacts to a timestamped CSV file +4. Generate messaging compose links for easy follow-up + +Monitor the output to see the agent's progress. The script will show a progress update every 5 contacts. + + + + + +--- + +## How It Works + +This script demonstrates a practical workflow for extracting LinkedIn connection data: + +1. **Session Persistence** - Manually log into LinkedIn through the VM once, and the VM saves your session +2. **Navigation** - The script navigates to your connections page using your saved authenticated session +3. **Data Extraction** - For each contact, the agent clicks their profile, extracts data, and navigates back +4. **Python Processing** - Python parses responses, validates data, and writes to CSV incrementally +5. **Output Files** - Generates a CSV with contact data and a text file with messaging URLs + +## Next Steps + +- Learn more about [Cua computers](/computer-sdk/computers) and [computer commands](/computer-sdk/commands) +- Read about [Agent loops](/agent-sdk/agent-loops), [tools](/agent-sdk/custom-tools), and [supported model providers](/agent-sdk/supported-model-providers/) +- Experiment with different [Models and Providers](/agent-sdk/supported-model-providers/) +- Adapt this script for other platforms (Twitter/X, email extraction, etc.) +- Join our [Discord community](https://discord.com/invite/mVnXXpdE85) for help diff --git a/docs/content/docs/example-usecases/windows-app-behind-vpn.mdx b/docs/content/docs/example-usecases/windows-app-behind-vpn.mdx new file mode 100644 index 00000000..3e910987 --- /dev/null +++ b/docs/content/docs/example-usecases/windows-app-behind-vpn.mdx @@ -0,0 +1,629 @@ +--- +title: Windows App behind VPN +description: Automate legacy Windows desktop applications behind VPN with Cua +--- + +import { Step, Steps } from 'fumadocs-ui/components/steps'; +import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; + +## Overview + +This guide demonstrates how to automate Windows desktop applications (like eGecko HR/payroll systems) that run behind corporate VPN. This is a common enterprise scenario where legacy desktop applications require manual data entry, report generation, or workflow execution. + +**Use cases:** + +- HR/payroll processing (employee onboarding, payroll runs, benefits administration) +- Desktop ERP systems behind corporate networks +- Legacy financial applications requiring VPN access +- Compliance reporting from on-premise systems + +**Architecture:** + +- Client-side Cua agent (Python SDK or Playground UI) +- Windows VM/Sandbox with VPN client configured +- RDP/remote desktop connection to target environment +- Desktop application automation via computer vision and UI control + + + **Production Deployment**: For production use, consider workflow mining and custom finetuning to + create vertical-specific actions (e.g., "Run payroll", "Onboard employee") instead of generic UI + automation. This provides better audit trails and higher success rates. + + +--- + +## Video Demo + +
+ +
+ Demo showing Cua automating an eGecko-like desktop application on Windows behind AWS VPN +
+
+ +--- + + + + + +### Set Up Your Environment + +Install the required dependencies: + +Create a `requirements.txt` file: + +```text +cua-agent +cua-computer +python-dotenv>=1.0.0 +``` + +Install the dependencies: + +```bash +pip install -r requirements.txt +``` + +Create a `.env` file with your API keys: + +```text +ANTHROPIC_API_KEY=your-anthropic-api-key +CUA_API_KEY=sk_cua-api01... +CUA_SANDBOX_NAME=your-windows-sandbox +``` + + + + + +### Configure Windows Sandbox with VPN + + + + +For enterprise deployments, use Cua Cloud Sandbox with pre-configured VPN: + +1. Go to [cua.ai/signin](https://cua.ai/signin) +2. Navigate to **Dashboard > Containers > Create Instance** +3. Create a **Windows** sandbox (Medium or Large for desktop apps) +4. Configure VPN settings: + - Upload your AWS VPN Client configuration (`.ovpn` file) + - Or configure VPN credentials directly in the dashboard +5. Note your sandbox name and API key + +Your Windows sandbox will launch with VPN automatically connected. + + + + +For local development on Windows 10 Pro/Enterprise or Windows 11: + +1. Enable [Windows Sandbox](https://learn.microsoft.com/en-us/windows/security/application-security/application-isolation/windows-sandbox/windows-sandbox-install) +2. Install the `pywinsandbox` dependency: + ```bash + pip install -U git+git://github.com/karkason/pywinsandbox.git + ``` +3. Create a VPN setup script that runs on sandbox startup +4. Configure your desktop application installation within the sandbox + + + **Manual VPN Setup**: Windows Sandbox requires manual VPN configuration each time it starts. For + production use, consider Cloud Sandbox or self-hosted VMs with persistent VPN connections. + + + + + +For self-managed infrastructure: + +1. Deploy Windows VM on your preferred cloud (AWS, Azure, GCP) +2. Install and configure VPN client (AWS VPN Client, OpenVPN, etc.) +3. Install target desktop application and any dependencies +4. Install `cua-computer-server`: + ```bash + pip install cua-computer-server + python -m computer_server + ``` +5. Configure firewall rules to allow Cua agent connections + + + + + + + + +### Create Your Automation Script + +Create a Python file (e.g., `hr_automation.py`): + + + + +```python +import asyncio +import logging +import os +from agent import ComputerAgent +from computer import Computer, VMProviderType +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +load_dotenv() + +async def automate_hr_workflow(): + """ + Automate HR/payroll desktop application workflow. + + This example demonstrates: + - Launching Windows desktop application + - Navigating complex desktop UI + - Data entry and form filling + - Report generation and export + """ + try: + # Connect to Windows Cloud Sandbox with VPN + async with Computer( + os_type="windows", + provider_type=VMProviderType.CLOUD, + name=os.environ["CUA_SANDBOX_NAME"], + api_key=os.environ["CUA_API_KEY"], + verbosity=logging.INFO, + ) as computer: + + # Configure agent with specialized instructions + agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=True, + max_trajectory_budget=10.0, + instructions=""" +You are automating a Windows desktop HR/payroll application. + +IMPORTANT GUIDELINES: +- Always wait for windows and dialogs to fully load before interacting +- Look for loading indicators and wait for them to disappear +- Verify each action by checking on-screen confirmation messages +- If a button or field is not visible, try scrolling or navigating tabs +- Desktop apps often have nested menus - explore systematically +- Save work frequently using File > Save or Ctrl+S +- Before closing, always verify changes were saved + +COMMON UI PATTERNS: +- Menu bar navigation (File, Edit, View, etc.) +- Ribbon interfaces with tabs +- Modal dialogs that block interaction +- Data grids/tables for viewing records +- Form fields with validation +- Status bars showing operation progress + """.strip() + ) + + # Define workflow tasks + tasks = [ + "Launch the HR application from the desktop or start menu", + "Log in with the credentials shown in credentials.txt on the desktop", + "Navigate to Employee Management section", + "Create a new employee record with information from new_hire.xlsx on desktop", + "Verify the employee was created successfully by searching for their name", + "Generate an onboarding report for the new employee", + "Export the report as PDF to the desktop", + "Log out of the application" + ] + + history = [] + + for task in tasks: + logger.info(f"\n{'='*60}") + logger.info(f"Task: {task}") + logger.info(f"{'='*60}\n") + + history.append({"role": "user", "content": task}) + + async for result in agent.run(history): + for item in result.get("output", []): + if item.get("type") == "message": + content = item.get("content", []) + for block in content: + if block.get("type") == "text": + response = block.get("text", "") + logger.info(f"Agent: {response}") + history.append({"role": "assistant", "content": response}) + + logger.info("\nTask completed. Moving to next task...\n") + + logger.info("\n" + "="*60) + logger.info("All tasks completed successfully!") + logger.info("="*60) + + except Exception as e: + logger.error(f"Error during automation: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + asyncio.run(automate_hr_workflow()) +``` + + + + +```python +import asyncio +import logging +import os +from agent import ComputerAgent +from computer import Computer, VMProviderType +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +load_dotenv() + +async def automate_hr_workflow(): + try: + # Connect to Windows Sandbox + async with Computer( + os_type="windows", + provider_type=VMProviderType.WINDOWS_SANDBOX, + verbosity=logging.INFO, + ) as computer: + + agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=True, + max_trajectory_budget=10.0, + instructions=""" +You are automating a Windows desktop HR/payroll application. + +IMPORTANT GUIDELINES: +- Always wait for windows and dialogs to fully load before interacting +- Verify each action by checking on-screen confirmation messages +- Desktop apps often have nested menus - explore systematically +- Save work frequently using File > Save or Ctrl+S + """.strip() + ) + + tasks = [ + "Launch the HR application from the desktop", + "Log in with credentials from credentials.txt on desktop", + "Navigate to Employee Management and create new employee from new_hire.xlsx", + "Generate and export onboarding report as PDF", + "Log out of the application" + ] + + history = [] + + for task in tasks: + logger.info(f"\nTask: {task}") + history.append({"role": "user", "content": task}) + + async for result in agent.run(history): + for item in result.get("output", []): + if item.get("type") == "message": + content = item.get("content", []) + for block in content: + if block.get("type") == "text": + response = block.get("text", "") + logger.info(f"Agent: {response}") + history.append({"role": "assistant", "content": response}) + + logger.info("\nAll tasks completed!") + + except Exception as e: + logger.error(f"Error: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + asyncio.run(automate_hr_workflow()) +``` + + + + +```python +import asyncio +import logging +import os +from agent import ComputerAgent +from computer import Computer +from dotenv import load_dotenv + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +load_dotenv() + +async def automate_hr_workflow(): + try: + # Connect to self-hosted Windows VM running computer-server + async with Computer( + use_host_computer_server=True, + base_url="http://your-windows-vm-ip:5757", # Update with your VM IP + verbosity=logging.INFO, + ) as computer: + + agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + only_n_most_recent_images=3, + verbosity=logging.INFO, + trajectory_dir="trajectories", + use_prompt_caching=True, + max_trajectory_budget=10.0, + instructions=""" +You are automating a Windows desktop HR/payroll application. + +IMPORTANT GUIDELINES: +- Always wait for windows and dialogs to fully load before interacting +- Verify each action by checking on-screen confirmation messages +- Save work frequently using File > Save or Ctrl+S + """.strip() + ) + + tasks = [ + "Launch the HR application", + "Log in with provided credentials", + "Complete the required HR workflow", + "Generate and export report", + "Log out" + ] + + history = [] + + for task in tasks: + logger.info(f"\nTask: {task}") + history.append({"role": "user", "content": task}) + + async for result in agent.run(history): + for item in result.get("output", []): + if item.get("type") == "message": + content = item.get("content", []) + for block in content: + if block.get("type") == "text": + response = block.get("text", "") + logger.info(f"Agent: {response}") + history.append({"role": "assistant", "content": response}) + + logger.info("\nAll tasks completed!") + + except Exception as e: + logger.error(f"Error: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + asyncio.run(automate_hr_workflow()) +``` + + + + + + + + +### Run Your Automation + +Execute the script: + +```bash +python hr_automation.py +``` + +The agent will: + +1. Connect to your Windows environment (with VPN if configured) +2. Launch and navigate the desktop application +3. Execute each workflow step sequentially +4. Verify actions and handle errors +5. Save trajectory logs for audit and debugging + +Monitor the console output to see the agent's progress through each task. + + + + + +--- + +## Key Configuration Options + +### Agent Instructions + +The `instructions` parameter is critical for reliable desktop automation: + +```python +instructions=""" +You are automating a Windows desktop HR/payroll application. + +IMPORTANT GUIDELINES: +- Always wait for windows and dialogs to fully load before interacting +- Look for loading indicators and wait for them to disappear +- Verify each action by checking on-screen confirmation messages +- If a button or field is not visible, try scrolling or navigating tabs +- Desktop apps often have nested menus - explore systematically +- Save work frequently using File > Save or Ctrl+S +- Before closing, always verify changes were saved + +COMMON UI PATTERNS: +- Menu bar navigation (File, Edit, View, etc.) +- Ribbon interfaces with tabs +- Modal dialogs that block interaction +- Data grids/tables for viewing records +- Form fields with validation +- Status bars showing operation progress + +APPLICATION-SPECIFIC: +- Login is at top-left corner +- Employee records are under "HR Management" > "Employees" +- Reports are generated via "Tools" > "Reports" > "Generate" +- Always click "Save" before navigating away from a form +""".strip() +``` + +### Budget Management + +For long-running workflows, adjust budget limits: + +```python +agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + max_trajectory_budget=20.0, # Increase for complex workflows + # ... other params +) +``` + +### Image Retention + +Balance context and cost by retaining only recent screenshots: + +```python +agent = ComputerAgent( + # ... + only_n_most_recent_images=3, # Keep last 3 screenshots + # ... +) +``` + +--- + +## Production Considerations + + + For enterprise production deployments, consider these additional steps: + + +### 1. Workflow Mining + +Before deploying, analyze your actual workflows: + +- Record user interactions with the application +- Identify common patterns and edge cases +- Map out decision trees and validation requirements +- Document application-specific quirks and timing issues + +### 2. Custom Finetuning + +Create vertical-specific actions instead of generic UI automation: + +```python +# Instead of generic steps: +tasks = ["Click login", "Type username", "Type password", "Click submit"] + +# Create semantic actions: +tasks = ["onboard_employee", "run_payroll", "generate_compliance_report"] +``` + +This provides: + +- Better audit trails +- Approval gates at business logic level +- Higher success rates +- Easier maintenance and updates + +### 3. Human-in-the-Loop + +Add approval gates for critical operations: + +```python +agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", + tools=[computer], + # Add human approval callback for sensitive operations + callbacks=[ApprovalCallback(require_approval_for=["payroll", "termination"])] +) +``` + +### 4. Deployment Options + +Choose your deployment model: + +**Managed (Recommended)** + +- Cua hosts Windows sandboxes, VPN/RDP stack, and agent runtime +- You get UI/API endpoints for triggering workflows +- Automatic scaling, monitoring, and maintenance +- SLA guarantees and enterprise support + +**Self-Hosted** + +- You manage Windows VMs, VPN infrastructure, and agent deployment +- Full control over data and security +- Custom network configurations +- On-premise or your preferred cloud + +--- + +## Troubleshooting + +### VPN Connection Issues + +If the agent cannot reach the application: + +1. Verify VPN is connected: Check VPN client status in the Windows sandbox +2. Test network connectivity: Try pinging internal resources +3. Check firewall rules: Ensure RDP and application ports are open +4. Review VPN logs: Look for authentication or routing errors + +### Application Not Launching + +If the desktop application fails to start: + +1. Verify installation: Check the application is installed in the sandbox +2. Check dependencies: Ensure all required DLLs and frameworks are present +3. Review permissions: Application may require admin rights +4. Check logs: Look for error messages in Windows Event Viewer + +### UI Element Not Found + +If the agent cannot find buttons or fields: + +1. Increase wait times: Some applications load slowly +2. Check screen resolution: UI elements may be off-screen +3. Verify DPI scaling: High DPI settings can affect element positions +4. Update instructions: Provide more specific navigation guidance + +### Cost Management + +If costs are higher than expected: + +1. Reduce `max_trajectory_budget` +2. Decrease `only_n_most_recent_images` +3. Use prompt caching: Set `use_prompt_caching=True` +4. Optimize task descriptions: Be more specific to reduce retry attempts + +--- + +## Next Steps + +- **Explore custom tools**: Learn how to create [custom tools](/agent-sdk/custom-tools) for application-specific actions +- **Implement callbacks**: Add [monitoring and logging](/agent-sdk/callbacks) for production workflows +- **Join community**: Get help in our [Discord](https://discord.com/invite/mVnXXpdE85) + +--- + +## Related Examples + +- [Form Filling](/example-usecases/form-filling) - Web form automation +- [Post-Event Contact Export](/example-usecases/post-event-contact-export) - Data extraction workflows +- [Custom Tools](/agent-sdk/custom-tools) - Building application-specific functions diff --git a/docs/content/docs/get-started/meta.json b/docs/content/docs/get-started/meta.json new file mode 100644 index 00000000..a14e8acb --- /dev/null +++ b/docs/content/docs/get-started/meta.json @@ -0,0 +1,7 @@ +{ + "title": "Get Started", + "description": "Get started with Cua", + "defaultOpen": true, + "icon": "Rocket", + "pages": ["../index", "quickstart"] +} diff --git a/docs/content/docs/get-started/quickstart.mdx b/docs/content/docs/get-started/quickstart.mdx new file mode 100644 index 00000000..f342b4ce --- /dev/null +++ b/docs/content/docs/get-started/quickstart.mdx @@ -0,0 +1,571 @@ +--- +title: Quickstart +description: Get started with Cua +--- + +import { Step, Steps } from 'fumadocs-ui/components/steps'; +import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; +import { Accordion, Accordions } from 'fumadocs-ui/components/accordion'; +import { Code, Terminal } from 'lucide-react'; + +{/* Choose your quickstart path: + +
+ } href="#developer-quickstart" title="Developer Quickstart"> + Build with Python or TypeScript SDKs - full programmatic control + + } href="#cli-quickstart" title="CLI Quickstart"> + Get started quickly with the command-line interface + +
*/} + +--- + +## Set Up Your Computer Environment + +Choose how you want to run your Cua computer. This will be the environment where your automated tasks will execute. + +You can run your Cua computer in the cloud (recommended for easiest setup), locally on macOS with Lume, locally on Windows with a Windows Sandbox, or in a Docker container on any platform. Choose the option that matches your system and needs. + + + + + Create and manage cloud sandboxes that run Linux (Ubuntu), Windows, or macOS. + + **First, create your API key:** + + 1. Go to [cua.ai/signin](https://cua.ai/signin) + 2. Navigate to **Dashboard > API Keys > New API Key** to create your API key + 3. **Important:** Copy and save your API key immediately - you won't be able to see it again (you'll need to regenerate if lost) + + **Then, create your sandbox using either option:** + + **Option 1: Via Website** + + 1. Navigate to **Dashboard > Sandboxes > Create Sandbox** + 2. Create a **Small** sandbox, choosing **Linux**, **Windows**, or **macOS** + 3. Note your sandbox name + + **Option 2: Via CLI** + + 1. Install the CUA CLI: + ```bash + # macOS/Linux + curl -LsSf https://cua.ai/cli/install.sh | sh + + # Windows + powershell -ExecutionPolicy ByPass -c "irm https://cua.ai/cli/install.ps1 | iex" + ``` + + 2. Login and create a sandbox: + ```bash + cua auth login + cua sb create --os linux --size small --region north-america + ``` + + 3. Note your sandbox name and password from the output + + Your Cloud Sandbox will be automatically configured and ready to use. + + + + + Run Linux desktop locally on macOS, Windows, or Linux hosts. + + 1. Install Docker Desktop or Docker Engine + + 2. Pull a CUA Docker image: + + ```bash + # XFCE (Lightweight) - recommended for most use cases + docker pull --platform=linux/amd64 trycua/cua-xfce:latest + + # OR KASM (Full-Featured) - full Ubuntu desktop + docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest + ``` + + + + + macOS hosts only - requires Lume CLI. + + 1. Install the Lume CLI: + + ```bash + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" + ``` + + 2. Start a local Cua sandbox: + + ```bash + lume run macos-sequoia-cua:latest + ``` + + + + + Windows hosts only - requires Windows 10 Pro/Enterprise or Windows 11. + + 1. Enable [Windows Sandbox](https://learn.microsoft.com/en-us/windows/security/application-security/application-isolation/windows-sandbox/windows-sandbox-install) + 2. Install the `pywinsandbox` dependency: + + ```bash + pip install -U git+git://github.com/karkason/pywinsandbox.git + ``` + + 3. Windows Sandbox will be automatically configured when you run the CLI + + + + +--- + +## Developer Quickstart + + + + + +### Using Computer + +Connect to your Cua computer and perform basic interactions, such as taking screenshots or simulating user input. + + + + Install the Cua computer Python SDK: + ```bash + pip install cua-computer + ``` + + Then, connect to your desired computer environment: + + + + Set your CUA API key (same key used for model inference) and connect to your sandbox: + ```python + import os + from computer import Computer + + os.environ["CUA_API_KEY"] = "sk_cua-api01_..." + + computer = Computer( + os_type="linux", # or "windows" or "macos" + provider_type="cloud", + name="your-sandbox-name" # from CLI or website + ) + await computer.run() # Connect to the sandbox + ``` + + + ```python + from computer import Computer + + computer = Computer( + os_type="linux", + provider_type="docker", + image="trycua/cua-xfce:latest" # or "trycua/cua-ubuntu:latest" + ) + await computer.run() # Launch & connect to the sandbox + ``` + + + ```python + from computer import Computer + + computer = Computer( + os_type="macos", + provider_type="lume", + name="macos-sequoia-cua:latest" + ) + await computer.run() # Launch & connect to the sandbox + ``` + + + ```python + from computer import Computer + + computer = Computer( + os_type="windows", + provider_type="windows_sandbox" + ) + await computer.run() # Launch & connect to the sandbox + ``` + + + Install and run `cua-computer-server`: + ```bash + pip install cua-computer-server + python -m computer_server + ``` + + Then, use the `Computer` object to connect: + ```python + from computer import Computer + + computer = Computer(use_host_computer_server=True) + await computer.run() # Connect to the host desktop + ``` + + + + Once connected, you can perform interactions: + ```python + try: + # Take a screenshot of the computer's current display + screenshot = await computer.interface.screenshot() + # Simulate a left-click at coordinates (100, 100) + await computer.interface.left_click(100, 100) + # Type "Hello!" into the active application + await computer.interface.type_text("Hello!") + finally: + await computer.close() + ``` + + + + + The TypeScript interface is currently deprecated. We're working on version 0.2.0 with improved TypeScript support. In the meantime, please use the Python SDK. + + + Install the Cua computer TypeScript SDK: + ```bash + npm install @trycua/computer + ``` + + Then, connect to your desired computer environment: + + + + Set your CUA API key (same key used for model inference): + ```bash + export CUA_API_KEY="sk_cua-api01_..." + ``` + + Then connect to your sandbox: + ```typescript + import { Computer, OSType } from '@trycua/computer'; + + const computer = new Computer({ + osType: OSType.LINUX, // or OSType.WINDOWS or OSType.MACOS + name: "your-sandbox-name" // from CLI or website + }); + await computer.run(); // Connect to the sandbox + ``` + + + ```typescript + import { Computer, OSType, ProviderType } from '@trycua/computer'; + + const computer = new Computer({ + osType: OSType.LINUX, + providerType: ProviderType.DOCKER, + image: "trycua/cua-xfce:latest" // or "trycua/cua-ubuntu:latest" + }); + await computer.run(); // Launch & connect to the sandbox + ``` + + + ```typescript + import { Computer, OSType, ProviderType } from '@trycua/computer'; + + const computer = new Computer({ + osType: OSType.MACOS, + providerType: ProviderType.LUME, + name: "macos-sequoia-cua:latest" + }); + await computer.run(); // Launch & connect to the sandbox + ``` + + + ```typescript + import { Computer, OSType, ProviderType } from '@trycua/computer'; + + const computer = new Computer({ + osType: OSType.WINDOWS, + providerType: ProviderType.WINDOWS_SANDBOX + }); + await computer.run(); // Launch & connect to the sandbox + ``` + + + First, install and run `cua-computer-server`: + ```bash + pip install cua-computer-server + python -m computer_server + ``` + + Then, use the `Computer` object to connect: + ```typescript + import { Computer } from '@trycua/computer'; + + const computer = new Computer({ useHostComputerServer: true }); + await computer.run(); // Connect to the host desktop + ``` + + + + Once connected, you can perform interactions: + ```typescript + try { + // Take a screenshot of the computer's current display + const screenshot = await computer.interface.screenshot(); + // Simulate a left-click at coordinates (100, 100) + await computer.interface.leftClick(100, 100); + // Type "Hello!" into the active application + await computer.interface.typeText("Hello!"); + } finally { + await computer.close(); + } + ``` + + + + +Learn more about computers in the [Cua computers documentation](/computer-sdk/computers). You will see how to automate computers with agents in the next step. + + + + + +### Using Agent + +Utilize an Agent to automate complex tasks by providing it with a goal and allowing it to interact with the computer environment. + +Install the Cua agent Python SDK: + +```bash +pip install "cua-agent[all]" +``` + +Choose how you want to access vision-language models for your agent: + + + + + Use CUA's inference API to access multiple model providers with a single API key (same key used for sandbox access). CUA VLM Router provides intelligent routing and cost optimization. + + **Use the agent with CUA models:** + ```python + import os + from agent import ComputerAgent + + os.environ["CUA_API_KEY"] = "sk_cua-api01_..." + + agent = ComputerAgent( + model="cua/anthropic/claude-sonnet-4.5", # CUA-routed model + tools=[computer], + max_trajectory_budget=5.0 + ) + + messages = [{"role": "user", "content": "Take a screenshot and tell me what you see"}] + + async for result in agent.run(messages): + for item in result["output"]: + if item["type"] == "message": + print(item["content"][0]["text"]) + ``` + + **Available CUA models:** + - `cua/anthropic/claude-sonnet-4.5` - Claude Sonnet 4.5 (recommended) + - `cua/anthropic/claude-opus-4.5` - Claude Opus 4.5 (enhanced agentic capabilities) + - `cua/anthropic/claude-haiku-4.5` - Claude Haiku 4.5 (faster, cost-effective) + - `cua/qwen/qwen3-vl-235b` - Qwen3 VL 235B (large-scale vision-language tasks) + + **Benefits:** + - Single API key for multiple providers + - Cost tracking and optimization + - No need to manage multiple provider keys + + + + + Use your own API keys from model providers like Anthropic, OpenAI, or others. + + **Use the agent with your provider:** + ```python + import os + from agent import ComputerAgent + + # Set your provider API key + os.environ["ANTHROPIC_API_KEY"] = "sk-ant-..." # For Anthropic + # OR + os.environ["OPENAI_API_KEY"] = "sk-..." # For OpenAI + + agent = ComputerAgent( + model="anthropic/claude-sonnet-4-5-20250929", # Direct provider model + tools=[computer], + max_trajectory_budget=5.0 + ) + + messages = [{"role": "user", "content": "Take a screenshot and tell me what you see"}] + + async for result in agent.run(messages): + for item in result["output"]: + if item["type"] == "message": + print(item["content"][0]["text"]) + ``` + + **Supported providers:** + - `anthropic/claude-*` - Anthropic Claude models + - `openai/gpt-*` - OpenAI GPT models + - `openai/o1-*` - OpenAI o1 models + - `huggingface-local/*` - Local HuggingFace models + - And many more via LiteLLM + + See [Supported Models](/agent-sdk/supported-model-providers/) for the complete list. + + + + +Learn more about agents in [Agent Loops](/agent-sdk/agent-loops) and available models in [Supported Models](/agent-sdk/supported-model-providers/). + + + + +### Next Steps + +- Learn more about [Cua computers](/computer-sdk/computers) and [computer commands](/computer-sdk/commands) +- Read about [Agent loops](/agent-sdk/agent-loops), [tools](/agent-sdk/custom-tools), and [supported model providers](/agent-sdk/supported-model-providers/) +- Join our [Discord community](https://discord.com/invite/mVnXXpdE85) for help +- Try out [Form Filling](/example-usecases/form-filling) preset usecase + +{/* --- + +## CLI Quickstart + +Get started quickly with the CUA CLI - the easiest way to manage cloud sandboxes and run AI agents. + + + + +### Install the CUA CLI + + + + ```bash + curl -LsSf https://cua.ai/cli/install.sh | sh + ``` + + + ```powershell + powershell -ExecutionPolicy ByPass -c "irm https://cua.ai/cli/install.ps1 | iex" + ``` + + + ```bash + # Install Bun if you don't have it + curl -fsSL https://bun.sh/install | bash + + # Install CUA CLI + bun add -g @trycua/cli + ``` + + + ```bash + # Install Bun (macOS/Linux) + curl -fsSL https://bun.sh/install | bash + + # Install Bun (Windows) + # powershell -c "irm bun.sh/install.ps1|iex" + + # Clone the repo + git clone https://github.com/trycua/cua + cd cua/libs/typescript/cua-cli + + # Install the CLI + bun install + bun link + bun link cua-cli + ``` + + + + + + + + +### Authenticate with CUA + +Login to your CUA account: + +```bash +# Interactive browser login (recommended) +cua auth login + +# Or provide your API key directly +cua auth login --api-key sk-your-api-key-here +``` + +If you don't have a CUA account yet, sign up at [cua.ai/signin](https://cua.ai/signin). + + + + + +### Create Your First Sandbox + +Create a cloud sandbox where your AI agents will run: + +```bash +# Create a Linux sandbox (recommended for most use cases) +cua sb create --os linux --size small --region north-america + +# Or create a Windows sandbox +cua sb create --os windows --size small --region north-america + +``` + +Your sandbox will be created and you'll see output like: + +``` +Sandbox created and ready: my-sandbox-abc123 +Password: secure-password-here +Host: my-sandbox-abc123.sandbox.cua.ai +``` + + + + + +### Start Using Your Sandbox + +You can now interact with your sandbox in multiple ways: + + + +#### Option 1: Access VNC Desktop + +```bash +cua sb vnc my-sandbox-abc123 +``` + +This opens a remote desktop connection to your sandbox. + +#### Option 2: List and Manage Sandboxes + +```bash +# List all your sandboxes +cua sb list + +# Start/stop sandboxes as needed +cua sb stop my-sandbox-abc123 +cua sb start my-sandbox-abc123 + +# Delete sandboxes when done +cua sb delete my-sandbox-abc123 +``` + + + + + +### What's Next? + +- **Explore more commands**: Check out the [complete CLI reference](/libraries/cua-cli/commands) +- **Learn about programming**: Try the [Developer Quickstart](#developer-quickstart) to build custom automations +- **Join the community**: Get help in our [Discord community](https://discord.com/invite/mVnXXpdE85) + +--- + +For running models locally, see [Running Models Locally](/agent-sdk/supported-model-providers/local-models). */} diff --git a/docs/content/docs/index.mdx b/docs/content/docs/index.mdx index 9e4bf2ff..f475db7f 100644 --- a/docs/content/docs/index.mdx +++ b/docs/content/docs/index.mdx @@ -1,25 +1,58 @@ --- -title: Home -icon: House +title: Introduction --- -import { Monitor, Code, BookOpen } from 'lucide-react'; +import { Monitor, Code, BookOpen, Zap, Bot, Boxes, Rocket } from 'lucide-react'; -# Welcome! +
+ Cua is an open-source framework for building **Computer-Use Agents** - AI systems that see, + understand, and interact with desktop applications through vision and action, just like humans do. +
-Cua is a framework for automating Windows, Mac, and Linux apps powered by computer-using agents (CUAs). +## Why Cua? -Cua makes every stage of computer-using agent development simple: +Cua gives you everything you need to automate any desktop application without brittle selectors or APIs. -- **Development**: Use any LLM provider with liteLLM. The agent SDK makes multiple agent loop providers, trajectory tracing, caching, and budget management easy -- **Containerization**: Cua offers Docker containers pre-installed with everything needed for AI-powered RPA -- **Deployment**: Cua cloud gives you a production-ready cloud environment for your assistants +Some highlights include: + +- **Model flexibility** - Connect to 100+ LLM providers through liteLLM's standard interface. Use models from Anthropic, OpenAI, Google, and more - or run them locally with Ollama, Hugging Face, or MLX. +- **Composed agents** - Mix and match grounding models with planning models for optimal performance. Use specialized models like GTA, OpenCUA, or OmniParser for UI element detection paired with powerful reasoning models like Claude or GPT-4. +- **Cross-platform sandboxes** - Run agents safely in isolated environments. Choose from Docker containers, macOS VMs with Lume, Windows Sandbox, or deploy to Cua Cloud with production-ready infrastructure. +- **Computer SDK** - Control any application with a PyAutoGUI-like API. Click, type, scroll, take screenshots, manage windows, read/write files - everything you need for desktop automation. +- **Agent SDK** - Build autonomous agents with trajectory tracing, prompt caching, cost tracking, and budget controls. Test agents on industry-standard benchmarks like OSWorld-Verified with one line of code. +- **Human-in-the-loop** - Pause agent execution and await user input or approval before continuing. Use the `human/human` model string to let humans control the agent directly. +- **Production essentials** - Ship reliable agents with built-in PII anonymization, cost tracking, trajectory logging, and integration with observability platforms like Laminar and HUD. + +## What can you build? + +- RPA automation that works with any application - even legacy software without APIs. +- Form-filling agents that handle complex multi-step web workflows. +- Testing automation that adapts to UI changes without brittle selectors. +- Data extraction from desktop applications and document processing. +- Cross-application workflows that combine multiple tools and services. +- Research agents that browse, read, and synthesize information from the web. + +Explore real-world examples in our [blog posts](https://cua.ai/blog). + +## Get started + +Follow the [Quickstart guide](/docs/get-started/quickstart) for step-by-step setup with Python or TypeScript. + +If you're new to computer-use agents, check out our [tutorials](https://cua.ai/blog), [examples](https://github.com/trycua/cua/tree/main/examples), and [notebooks](https://github.com/trycua/cua/tree/main/notebooks) to start building with Cua today.
- } href="/quickstart-devs" title="Quickstart (Developers)"> - Build with Pythonβ€”full SDK and agent code examples. + } href="/get-started/quickstart" title="Quickstart"> + Get up and running in 3 steps with Python or TypeScript. - } href="/libraries/agent" title="API Reference"> - Explore the agent SDK and APIs + } href="/agent-sdk/agent-loops" title="Agent Loops"> + Learn how agents work and how to build your own. + + } href="/computer-sdk/computers" title="Computer SDK"> + Control desktop applications with the Computer SDK. + + } href="/example-usecases/form-filling" title="Example Use Cases"> + See Cua in action with real-world examples.
+ +We can't wait to see what you build with Cua ✨ diff --git a/docs/content/docs/libraries/cua-cli/commands.mdx b/docs/content/docs/libraries/cua-cli/commands.mdx new file mode 100644 index 00000000..b425b9a4 --- /dev/null +++ b/docs/content/docs/libraries/cua-cli/commands.mdx @@ -0,0 +1,360 @@ +--- +title: Commands +description: Complete reference for all CUA CLI commands +--- + +import { Tabs, Tab } from 'fumadocs-ui/components/tabs'; +import { Callout } from 'fumadocs-ui/components/callout'; + +## Overview + +The CUA CLI provides commands for authentication and sandbox management. + +### Command Styles + +The CLI supports **two command styles** for flexibility: + +**Flat style** (quick & concise): + +```bash +cua list +cua create --os linux --size small --region north-america +cua start my-sandbox +``` + +**Grouped style** (explicit & clear): + +```bash +cua sb list # or: cua sandbox list +cua sb create # or: cua sandbox create +cua sb start # or: cua sandbox start +``` + +Both styles work identically - use whichever you prefer! + +### Available Commands + +- **Authentication** - `cua auth login`, `cua auth env`, `cua auth logout` (also available as flat commands: `cua login`, `cua env`, `cua logout`) +- **Sandbox Management** - `cua list`, `cua create`, `cua start`, `cua stop`, `cua restart`, `cua delete`, `cua vnc` + +## Authentication Commands + +### `cua auth login` + +Authenticate with your CUA account using browser-based OAuth flow. + +```bash +# Interactive browser login +cua auth login + +# Direct API key login +cua auth login --api-key sk-your-api-key-here + +# Alternative flat style +cua login +cua login --api-key sk-your-api-key-here +``` + +**Options:** + +- `--api-key ` - Provide API key directly instead of browser flow + +**Example:** + +```bash +$ cua auth login +Opening browser for CLI auth... +API key saved +``` + +### `cua auth env` + +Create or update a `.env` file in the current directory with your CUA API key. + +```bash +cua auth env + +# Alternative flat style +cua env +``` + +**Example:** + +```bash +$ cua auth env +Wrote /path/to/your/project/.env +``` + +The generated `.env` file will contain: + +``` +CUA_API_KEY=sk-your-api-key-here +``` + +### `cua auth logout` + +Remove the stored API key from your system. + +```bash +cua auth logout + +# Alternative flat style +cua logout +``` + +**Example:** + +```bash +$ cua auth logout +Logged out +``` + +## Sandbox Commands + +### `cua list` + +List all your sandboxes with their current status. Passwords are hidden by default for security. + +```bash +# List sandboxes (passwords hidden) +cua list + +# Show passwords explicitly +cua list --show-passwords + +# Alternative aliases +cua ls +cua ps +``` + +**Example Output (default, passwords hidden):** + +``` +NAME STATUS HOST +my-dev-sandbox running my-dev-sandbox.sandbox.cua.ai +test-windows stopped test-windows.sandbox.cua.ai +``` + +**Example Output (with --show-passwords):** + +``` +NAME STATUS PASSWORD HOST +my-dev-sandbox running secure-pass-123 my-dev-sandbox.sandbox.cua.ai +test-windows stopped another-pass-456 test-windows.sandbox.cua.ai +``` + +### `cua create` + +Create a new sandbox. + +```bash +cua create --os --size --region +``` + +**Required Options:** + +- `--os` - Operating system: `linux`, `windows`, `macos` +- `--size` - Sandbox size: `small`, `medium`, `large` +- `--region` - Region: `north-america`, `europe`, `asia-pacific`, `south-america` + +**Examples:** + +```bash +# Create a small Linux sandbox in North America +cua create --os linux --size small --region north-america + +# Create a medium Windows sandbox in Europe +cua create --os windows --size medium --region europe + +# Create a large macOS sandbox in Asia Pacific +cua create --os macos --size large --region asia-pacific +``` + +**Response Types:** + +**Immediate (Status 200):** + +```bash +Sandbox created and ready: my-new-sandbox-abc123 +Password: secure-password-here +Host: my-new-sandbox-abc123.sandbox.cua.ai +``` + +**Provisioning (Status 202):** + +```bash +Sandbox provisioning started: my-new-sandbox-abc123 +Job ID: job-xyz789 +Use 'cua list' to monitor provisioning progress +``` + +### `cua start` + +Start a stopped sandbox. + +```bash +cua start +``` + +**Example:** + +```bash +$ cua start my-dev-sandbox +Start accepted +``` + +### `cua stop` + +Stop a running sandbox. + +```bash +cua stop +``` + +**Example:** + +```bash +$ cua stop my-dev-sandbox +stopping +``` + +### `cua restart` + +Restart a sandbox. + +```bash +cua restart +``` + +**Example:** + +```bash +$ cua restart my-dev-sandbox +restarting +``` + +### `cua delete` + +Delete a sandbox permanently. + +```bash +cua delete +``` + +**Example:** + +```bash +$ cua delete old-test-sandbox +Sandbox deletion initiated: deleting +``` + + + This action is irreversible. All data on the sandbox will be permanently lost. + + +### `cua vnc` + +Open the VNC interface for a sandbox in your browser. + +```bash +cua vnc + +# Alternative alias +cua open +``` + +**Example:** + +```bash +$ cua vnc my-dev-sandbox +Opening NoVNC: https://my-dev-sandbox.sandbox.cua.ai/vnc.html?autoconnect=true&password=... +``` + +This command automatically opens your default browser to the VNC interface with the correct password pre-filled. + +## Global Options + +### Help + +Get help for any command: + +```bash +cua --help +cua auth login --help +cua create --help +cua list --help +``` + +## Error Handling + +The CLI provides clear error messages for common issues: + +### Authentication Errors + +```bash +$ cua list +Unauthorized. Try 'cua auth login' again. +``` + +### Sandbox Not Found + +```bash +$ cua start nonexistent-sandbox +Sandbox not found +``` + +### Invalid Configuration + +```bash +$ cua create --os invalid --configuration small --region north-america +Invalid request or unsupported configuration +``` + +## Tips and Best Practices + +### 1. Use Descriptive Sandbox Names + +```bash +# Good +cua create --os linux --size small --region north-america +# Then rename or use meaningful names in the dashboard + +# Better workflow +cua list # Check the generated name +# Use that name consistently +``` + +### 2. Environment Management + +```bash +# Set up your project with API key +cd my-project +cua auth env +# Now your project has CUA_API_KEY in .env +``` + +### 3. Quick Sandbox Access + +```bash +# Create aliases for frequently used sandboxes +alias dev-sandbox="cua vnc my-development-sandbox" +alias prod-sandbox="cua vnc my-production-sandbox" +``` + +### 4. Monitoring Provisioning + +```bash +# For sandboxes that need provisioning time +cua create --os windows --size large --region europe +# Sandbox provisioning started: my-sandbox-abc123 +# Job ID: job-xyz789 + +# Check status periodically +watch -n 5 cua list +``` + +## Next Steps + +- [Get started with the quickstart guide](/get-started/quickstart#cli-quickstart) +- [Learn about CUA computers](/computer-sdk/computers) +- [Explore agent automation](/agent-sdk/agent-loops) diff --git a/docs/content/docs/libraries/cua-cli/index.mdx b/docs/content/docs/libraries/cua-cli/index.mdx new file mode 100644 index 00000000..7a7ac914 --- /dev/null +++ b/docs/content/docs/libraries/cua-cli/index.mdx @@ -0,0 +1,58 @@ +--- +title: Cua CLI +description: Command-line interface for managing Cua cloud sandboxes and authentication +--- + +import { Tabs, Tab } from 'fumadocs-ui/components/tabs'; + +The Cua CLI is a command-line tool that provides an intuitive interface for managing your Cua cloud sandboxes and authentication. It offers a streamlined workflow for creating, managing, and connecting to cloud sandboxes. + +## Key Features + +- **Authentication Management**: Secure login with browser-based OAuth flow +- **Sandbox Lifecycle**: Create, start, stop, restart, and delete cloud sandboxes +- **Quick Access**: Direct links to VNC and playground interfaces +- **Cross-Platform**: Works on macOS, Linux, and Windows +- **Environment Integration**: Automatic `.env` file generation + +## Quick Example + +```bash +# Install the CLI (installs Bun + CUA CLI) +curl -LsSf https://cua.ai/cli/install.sh | sh + +# Login to your CUA account +cua auth login + +# Create a new Linux sandbox +cua sb create --os linux --size small --region north-america + +# List your sandboxes +cua sb list +``` + +## Use Cases + +### Development Workflow + +- Quickly spin up cloud sandboxes for testing +- Manage multiple sandboxes across different regions +- Integrate with CI/CD pipelines + +### Team Collaboration + +- Share sandbox configurations and access +- Standardize development environments +- Quick onboarding for new team members + +### Automation + +- Script sandbox provisioning and management +- Integrate with deployment workflows +- Automate environment setup + +## Next Steps + +- [Install the CLI](/libraries/cua-cli/installation) +- [Learn about available commands](/libraries/cua-cli/commands) +- [Get started with the quickstart guide](/get-started/quickstart#cli-quickstart) diff --git a/docs/content/docs/libraries/cua-cli/installation.mdx b/docs/content/docs/libraries/cua-cli/installation.mdx new file mode 100644 index 00000000..9e08a7f0 --- /dev/null +++ b/docs/content/docs/libraries/cua-cli/installation.mdx @@ -0,0 +1,130 @@ +--- +title: Installation +description: Install the CUA CLI on your system +--- + +import { Tabs, Tab } from 'fumadocs-ui/components/tabs'; +import { Callout } from 'fumadocs-ui/components/callout'; + +## Quick Install + +The fastest way to install the CUA CLI is using our installation scripts: + + + ```bash curl -LsSf https://cua.ai/cli/install.sh | sh ``` + + ```powershell powershell -ExecutionPolicy ByPass -c "irm https://cua.ai/cli/install.ps1 | iex" + ``` + + + +These scripts will automatically: + +1. Install [Bun](https://bun.sh) (a fast JavaScript runtime) +2. Install the CUA CLI via `bun add -g @trycua/cli` + + + The installation scripts will automatically detect your system and install the appropriate binary + to your PATH. + + +## Alternative: Install with Bun + +You can also install the CLI directly using Bun: + +```bash +# Install Bun if you don't have it +curl -fsSL https://bun.sh/install | bash + +# Install CUA CLI +bun add -g @trycua/cli +``` + + + Using Bun provides faster installation and better performance compared to npm. If you don't have + Bun installed, the first command will install it for you. + + +## Verify Installation + +After installation, verify the CLI is working: + +```bash +cua --help +``` + +You should see the CLI help output with available commands. + +## First Time Setup + +After installation, you'll need to authenticate with your CUA account: + +```bash +# Login with browser-based OAuth flow +cua auth login + +# Or provide your API key directly +cua auth login --api-key sk-your-api-key-here +``` + +## Updating + +To update to the latest version: + + + + Re-run the installation script: ```bash # macOS/Linux curl -LsSf https://cua.ai/cli/install.sh | + sh # Windows powershell -ExecutionPolicy ByPass -c "irm https://cua.ai/cli/install.ps1 | iex" + ``` + + ```bash npm update -g @trycua/cli ``` + + +## Uninstalling + + + + Remove the binary from your PATH: ```bash # macOS/Linux rm $(which cua) # Windows # Remove from + your PATH or delete the executable ``` + + ```bash npm uninstall -g @trycua/cli ``` + + +## Troubleshooting + +### Command Not Found + +If you get a "command not found" error after installation: + +1. **Check your PATH**: Make sure the installation directory is in your PATH +2. **Restart your terminal**: Close and reopen your terminal/command prompt +3. **Manual PATH setup**: Add the installation directory to your PATH manually + +### Permission Issues + +If you encounter permission issues during installation: + + + + Try running with sudo (not recommended for the curl method): ```bash # If using npm sudo npm + install -g @trycua/cli ``` + + + Run PowerShell as Administrator: ```powershell # Right-click PowerShell and "Run as + Administrator" powershell -ExecutionPolicy ByPass -c "irm https://cua.ai/cli/install.ps1 | iex" + ``` + + + +### Network Issues + +If the installation script fails due to network issues: + +1. **Check your internet connection** +2. **Try the npm installation method instead** +3. **Check if your firewall is blocking the download** + +## Next Steps + +- [Learn about CLI commands](/libraries/cua-cli/commands) +- [Follow the quickstart guide](/get-started/quickstart#cli-quickstart) diff --git a/docs/content/docs/libraries/cua-cli/meta.json b/docs/content/docs/libraries/cua-cli/meta.json new file mode 100644 index 00000000..f1c957cc --- /dev/null +++ b/docs/content/docs/libraries/cua-cli/meta.json @@ -0,0 +1,5 @@ +{ + "title": "CLI", + "description": "Command-line interface for CUA", + "pages": ["index", "installation", "commands"] +} diff --git a/docs/content/docs/libraries/lume/installation.mdx b/docs/content/docs/libraries/lume/installation.mdx index 7b990665..aa6da45a 100644 --- a/docs/content/docs/libraries/lume/installation.mdx +++ b/docs/content/docs/libraries/lume/installation.mdx @@ -5,7 +5,7 @@ description: Installation instructions for the current version of the Lume CLI. ## Quickstart -Install and run a prebuilt macOS VM in two commands: +Install and run a prebuilt macOS sandbox in two commands: ```bash # Install Lume diff --git a/docs/content/docs/libraries/mcp-server/client-integrations.mdx b/docs/content/docs/libraries/mcp-server/client-integrations.mdx index 4ad0c6a6..43d76ab5 100644 --- a/docs/content/docs/libraries/mcp-server/client-integrations.mdx +++ b/docs/content/docs/libraries/mcp-server/client-integrations.mdx @@ -6,6 +6,72 @@ title: Client Integrations To use with Claude Desktop, add an entry to your Claude Desktop configuration (`claude_desktop_config.json`, typically found in `~/.config/claude-desktop/`): +### Package Installation Method + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-20250514", + "ANTHROPIC_API_KEY": "your-anthropic-api-key-here", + "CUA_MAX_IMAGES": "3", + "CUA_USE_HOST_COMPUTER_SERVER": "false" + } + } + } +} +``` + +### Development Method + +If you're working with the CUA source code: + +**Standard VM Mode:** + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/usr/bin/env", + "args": [ + "bash", + "-lc", + "export CUA_MODEL_NAME='anthropic/claude-sonnet-4-20250514'; export ANTHROPIC_API_KEY='your-anthropic-api-key-here'; /path/to/cua/libs/python/mcp-server/scripts/start_mcp_server.sh" + ] + } + } +} +``` + +**Host Computer Control Mode:** + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/usr/bin/env", + "args": [ + "bash", + "-lc", + "export CUA_MODEL_NAME='anthropic/claude-sonnet-4-20250514'; export ANTHROPIC_API_KEY='your-anthropic-api-key-here'; export CUA_USE_HOST_COMPUTER_SERVER='true'; export CUA_MAX_IMAGES='1'; /path/to/cua/libs/python/mcp-server/scripts/start_mcp_server.sh" + ] + } + } +} +``` + +**Note**: Replace `/path/to/cua` with the absolute path to your CUA repository directory. + +**⚠️ Host Computer Control Setup**: When using `CUA_USE_HOST_COMPUTER_SERVER='true'`, you must also: + +1. Install computer server dependencies: `python3 -m pip install uvicorn fastapi` +2. Install the computer server: `python3 -m pip install -e libs/python/computer-server --break-system-packages` +3. Start the computer server: `python -m computer_server --log-level debug` +4. The AI will have direct access to your desktop - use with caution! + For more information on MCP with Claude Desktop, see the [official MCP User Guide](https://modelcontextprotocol.io/quickstart/user). ## Cursor Integration @@ -15,6 +81,43 @@ To use with Cursor, add an MCP configuration file in one of these locations: - **Project-specific**: Create `.cursor/mcp.json` in your project directory - **Global**: Create `~/.cursor/mcp.json` in your home directory +Example configuration for Cursor: + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-20250514", + "ANTHROPIC_API_KEY": "your-anthropic-api-key-here" + } + } + } +} +``` + After configuration, you can simply tell Cursor's Agent to perform computer tasks by explicitly mentioning the CUA agent, such as "Use the computer control tools to open Safari." For more information on MCP with Cursor, see the [official Cursor MCP documentation](https://docs.cursor.com/context/model-context-protocol). + +## Other MCP Clients + +The MCP server is compatible with any MCP-compliant client. The server exposes the following tools: + +- `run_cua_task` - Execute single computer tasks +- `run_multi_cua_tasks` - Execute multiple tasks (sequential or concurrent) +- `screenshot_cua` - Capture screenshots +- `get_session_stats` - Monitor session statistics +- `cleanup_session` - Manage session lifecycle + +### Configuration Options + +All MCP clients can configure the server using environment variables: + +- `CUA_MODEL_NAME` - Model to use for task execution +- `CUA_MAX_IMAGES` - Maximum images to keep in context +- `CUA_USE_HOST_COMPUTER_SERVER` - Use host system instead of VM + +See the [Configuration](/docs/libraries/mcp-server/configuration) page for detailed configuration options. diff --git a/docs/content/docs/libraries/mcp-server/configuration.mdx b/docs/content/docs/libraries/mcp-server/configuration.mdx index 998ccc29..30c3074f 100644 --- a/docs/content/docs/libraries/mcp-server/configuration.mdx +++ b/docs/content/docs/libraries/mcp-server/configuration.mdx @@ -4,7 +4,70 @@ title: Configuration The server is configured using environment variables (can be set in the Claude Desktop config): -| Variable | Description | Default | -| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | -| `CUA_MODEL_NAME` | Model string (e.g., "anthropic/claude-3-5-sonnet-20241022", "openai/computer-use-preview", "huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", "omniparser+litellm/gpt-4o", "omniparser+ollama_chat/gemma3") | anthropic/claude-3-5-sonnet-20241022 | -| `CUA_MAX_IMAGES` | Maximum number of images to keep in context | 3 | +| Variable | Description | Default | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------- | +| `CUA_MODEL_NAME` | Model string (e.g., "anthropic/claude-sonnet-4-20250514", "openai/computer-use-preview", "huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", "omniparser+litellm/gpt-4o", "omniparser+ollama_chat/gemma3") | anthropic/claude-sonnet-4-20250514 | +| `ANTHROPIC_API_KEY` | Your Anthropic API key (required for Anthropic models) | None | +| `CUA_MAX_IMAGES` | Maximum number of images to keep in context | 3 | +| `CUA_USE_HOST_COMPUTER_SERVER` | Target your local desktop instead of a VM. Set to "true" to use your host system. **Warning:** AI models may perform risky actions. | false | + +## Model Configuration + +The `CUA_MODEL_NAME` environment variable supports various model providers through LiteLLM integration: + +### Supported Providers + +- **Anthropic**: `anthropic/claude-sonnet-4-20250514`, +- **OpenAI**: `openai/computer-use-preview`, `openai/gpt-4o` +- **Local Models**: `huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B` +- **Omni + LiteLLM**: `omniparser+litellm/gpt-4o`, `omniparser+litellm/claude-3-haiku` +- **Ollama**: `omniparser+ollama_chat/gemma3` + +### Example Configurations + +**Claude Desktop Configuration:** + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-20250514", + "ANTHROPIC_API_KEY": "your-anthropic-api-key-here", + "CUA_MAX_IMAGES": "5", + "CUA_USE_HOST_COMPUTER_SERVER": "false" + } + } + } +} +``` + +**Local Model Configuration:** + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", + "CUA_MAX_IMAGES": "3" + } + } + } +} +``` + +## Session Management Configuration + +The MCP server automatically manages sessions with the following defaults: + +- **Max Concurrent Sessions**: 10 +- **Session Timeout**: 10 minutes of inactivity +- **Computer Pool Size**: 5 instances +- **Automatic Cleanup**: Enabled + +These settings are optimized for typical usage and don't require configuration for most users. diff --git a/docs/content/docs/libraries/mcp-server/index.mdx b/docs/content/docs/libraries/mcp-server/index.mdx index e79d6b1e..7f2c7684 100644 --- a/docs/content/docs/libraries/mcp-server/index.mdx +++ b/docs/content/docs/libraries/mcp-server/index.mdx @@ -7,3 +7,21 @@ github: --- **cua-mcp-server** is a MCP server for the Computer-Use Agent (CUA), allowing you to run CUA through Claude Desktop or other MCP clients. + +## Features + +- **Multi-Client Support**: Concurrent sessions with automatic resource management +- **Progress Reporting**: Real-time progress updates during task execution +- **Error Handling**: Robust error recovery with screenshot capture +- **Concurrent Execution**: Run multiple tasks in parallel for improved performance +- **Session Management**: Automatic cleanup and resource pooling +- **LiteLLM Integration**: Support for multiple model providers +- **VM Safety**: Default VM execution with optional host system control + +## Quick Start + +1. **Install**: `pip install cua-mcp-server` +2. **Configure**: Add to your MCP client configuration +3. **Use**: Ask Claude to perform computer tasks + +See the [Installation](/docs/libraries/mcp-server/installation) guide for detailed setup instructions. diff --git a/docs/content/docs/libraries/mcp-server/installation.mdx b/docs/content/docs/libraries/mcp-server/installation.mdx index 9c0d281f..b9c14f09 100644 --- a/docs/content/docs/libraries/mcp-server/installation.mdx +++ b/docs/content/docs/libraries/mcp-server/installation.mdx @@ -38,19 +38,103 @@ You can then use the script in your MCP configuration like this: "command": "/bin/bash", "args": ["~/.cua/start_mcp_server.sh"], "env": { - "CUA_MODEL_NAME": "anthropic/claude-3-5-sonnet-20241022" + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-20250514", + "ANTHROPIC_API_KEY": "your-anthropic-api-key-here" } } } } ``` +**Important**: You must include your Anthropic API key for the MCP server to work properly. + +## Development Setup + +If you're working with the CUA source code directly (like in the CUA repository), you can use the development script instead: + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/usr/bin/env", + "args": [ + "bash", + "-lc", + "export CUA_MODEL_NAME='anthropic/claude-sonnet-4-20250514'; export ANTHROPIC_API_KEY='your-anthropic-api-key-here'; /path/to/cua/libs/python/mcp-server/scripts/start_mcp_server.sh" + ] + } + } +} +``` + +**For host computer control** (development setup): + +1. **Install Computer Server Dependencies**: + + ```bash + python3 -m pip install uvicorn fastapi + python3 -m pip install -e libs/python/computer-server --break-system-packages + ``` + +2. **Start the Computer Server**: + + ```bash + cd /path/to/cua + python -m computer_server --log-level debug + ``` + + This will start the computer server on `http://localhost:8000` that controls your actual desktop. + +3. **Configure Claude Desktop**: + ```json + { + "mcpServers": { + "cua-agent": { + "command": "/usr/bin/env", + "args": [ + "bash", + "-lc", + "export CUA_MODEL_NAME='anthropic/claude-sonnet-4-20250514'; export ANTHROPIC_API_KEY='your-anthropic-api-key-here'; export CUA_USE_HOST_COMPUTER_SERVER='true'; export CUA_MAX_IMAGES='1'; /path/to/cua/libs/python/mcp-server/scripts/start_mcp_server.sh" + ] + } + } + } + ``` + +**Note**: Replace `/path/to/cua` with the absolute path to your CUA repository directory. + +**⚠️ Important**: When using host computer control (`CUA_USE_HOST_COMPUTER_SERVER='true'`), the AI will have direct access to your desktop and can perform actions like opening applications, clicking, typing, and taking screenshots. Make sure you're comfortable with this level of access. + ### Troubleshooting -If you get a `/bin/bash: ~/cua/libs/python/mcp-server/scripts/start_mcp_server.sh: No such file or directory` error, try changing the path to the script to be absolute instead of relative. +**Common Issues:** -To see the logs: +1. **"Claude's response was interrupted"** - This usually means: + - Missing API key: Add `ANTHROPIC_API_KEY` to your environment variables + - Invalid model name: Use a valid model like `anthropic/claude-sonnet-4-20250514` + - Check logs for specific error messages -``` +2. **"Missing Anthropic API Key"** - Add your API key to the configuration: + + ```json + "env": { + "ANTHROPIC_API_KEY": "your-api-key-here" + } + ``` + +3. **"model not found"** - Use a valid model name: + - βœ… `anthropic/claude-sonnet-4-20250514` + +4. **Script not found** - If you get a `/bin/bash: ~/cua/libs/python/mcp-server/scripts/start_mcp_server.sh: No such file or directory` error, try changing the path to the script to be absolute instead of relative. + +5. **Host Computer Control Issues** - If using `CUA_USE_HOST_COMPUTER_SERVER='true'`: + - **Computer Server not running**: Make sure you've started the computer server with `python -m computer_server --log-level debug` + - **Port 8000 in use**: Check if another process is using port 8000 with `lsof -i :8000` + - **Missing dependencies**: Install `uvicorn` and `fastapi` with `python3 -m pip install uvicorn fastapi` + - **Image size errors**: Use `CUA_MAX_IMAGES='1'` to reduce image context size + +**Viewing Logs:** + +```bash tail -n 20 -f ~/Library/Logs/Claude/mcp*.log ``` diff --git a/docs/content/docs/libraries/mcp-server/llm-integrations.mdx b/docs/content/docs/libraries/mcp-server/llm-integrations.mdx index 6dedd52d..656def70 100644 --- a/docs/content/docs/libraries/mcp-server/llm-integrations.mdx +++ b/docs/content/docs/libraries/mcp-server/llm-integrations.mdx @@ -12,7 +12,7 @@ This MCP server features comprehensive liteLLM integration, allowing you to use ### Model String Examples: -- **Anthropic**: `"anthropic/claude-3-5-sonnet-20241022"` +- **Anthropic**: `"anthropic/claude-sonnet-4-5-20250929"` - **OpenAI**: `"openai/computer-use-preview"` - **UI-TARS**: `"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B"` - **Omni + Any LiteLLM**: `"omniparser+litellm/gpt-4o"`, `"omniparser+litellm/claude-3-haiku"`, `"omniparser+ollama_chat/gemma3"` diff --git a/docs/content/docs/libraries/mcp-server/tools.mdx b/docs/content/docs/libraries/mcp-server/tools.mdx index fd09b366..0b4616ef 100644 --- a/docs/content/docs/libraries/mcp-server/tools.mdx +++ b/docs/content/docs/libraries/mcp-server/tools.mdx @@ -6,5 +6,61 @@ title: Tools The MCP server exposes the following tools to Claude: -1. `run_cua_task` - Run a single Computer-Use Agent task with the given instruction -2. `run_multi_cua_tasks` - Run multiple tasks in sequence +### Core Task Execution Tools + +1. **`run_cua_task`** - Run a single Computer-Use Agent task with the given instruction + - `task` (string): The task description for the agent to execute + - `session_id` (string, optional): Session ID for multi-client support. If not provided, a new session will be created + - Returns: Tuple of (combined text output, final screenshot) + +2. **`run_multi_cua_tasks`** - Run multiple tasks in sequence or concurrently + - `tasks` (list of strings): List of task descriptions to execute + - `session_id` (string, optional): Session ID for multi-client support. If not provided, a new session will be created + - `concurrent` (boolean, optional): If true, run tasks concurrently. If false, run sequentially (default) + - Returns: List of tuples (combined text output, screenshot) for each task + +### Utility Tools + +3. **`screenshot_cua`** - Take a screenshot of the current screen + - `session_id` (string, optional): Session ID for multi-client support. If not provided, a new session will be created + - Returns: Screenshot image + +4. **`get_session_stats`** - Get statistics about active sessions and resource usage + - Returns: Dictionary with session statistics including total sessions, active tasks, and session details + +5. **`cleanup_session`** - Cleanup a specific session and release its resources + - `session_id` (string): The session ID to cleanup + - Returns: Confirmation message + +## Session Management + +The MCP server supports multi-client sessions with automatic resource management: + +- **Session Isolation**: Each client can have its own session with isolated computer instances +- **Resource Pooling**: Computer instances are pooled for efficient resource usage +- **Automatic Cleanup**: Idle sessions are automatically cleaned up after 10 minutes +- **Concurrent Tasks**: Multiple tasks can run concurrently within the same session +- **Progress Reporting**: Real-time progress updates during task execution + +## Usage Examples + +### Basic Task Execution + +``` +"Open Chrome and navigate to github.com" +"Create a folder called 'Projects' on my desktop" +``` + +### Multi-Task Execution + +``` +"Run these tasks: 1) Open Finder, 2) Navigate to Documents, 3) Create a new folder called 'Work'" +``` + +### Session Management + +``` +"Take a screenshot of the current screen" +"Show me the session statistics" +"Cleanup session abc123" +``` diff --git a/docs/content/docs/libraries/mcp-server/usage.mdx b/docs/content/docs/libraries/mcp-server/usage.mdx index 2cefa2be..d65fc644 100644 --- a/docs/content/docs/libraries/mcp-server/usage.mdx +++ b/docs/content/docs/libraries/mcp-server/usage.mdx @@ -2,7 +2,7 @@ title: Usage --- -## Usage +## Basic Usage Once configured, you can simply ask Claude to perform computer tasks: @@ -13,9 +13,157 @@ Once configured, you can simply ask Claude to perform computer tasks: Claude will automatically use your CUA agent to perform these tasks. -### First-time Usage Notes +## Advanced Features + +### Progress Reporting + +The MCP server provides real-time progress updates during task execution: + +- Task progress is reported as percentages (0-100%) +- Multi-task operations show progress for each individual task +- Progress updates are streamed to the MCP client for real-time feedback + +### Error Handling + +Robust error handling ensures reliable operation: + +- Failed tasks return error messages with screenshots when possible +- Session state is preserved even when individual tasks fail +- Automatic cleanup prevents resource leaks +- Detailed error logging for troubleshooting + +### Concurrent Task Execution + +For improved performance, multiple tasks can run concurrently: + +- Set `concurrent=true` in `run_multi_cua_tasks` for parallel execution +- Each task runs in its own context with isolated state +- Progress tracking works for both sequential and concurrent modes +- Resource pooling ensures efficient computer instance usage + +### Session Management + +Multi-client support with automatic resource management: + +- Each client gets isolated sessions with separate computer instances +- Sessions automatically clean up after 10 minutes of inactivity +- Resource pooling prevents resource exhaustion +- Session statistics available for monitoring + +## Target Computer Options + +By default, the MCP server runs CUA in a virtual machine for safety. However, you can also configure it to run on your local system. + +### Default: Using a VM (Recommended) + +The MCP server will automatically start and connect to a VM based on your platform. This is the safest option as AI actions are isolated from your host system. + +No additional configuration is needed - this is the default behavior. + +### Option: Targeting Your Local Desktop + + + **Warning:** When targeting your local system, AI models have direct access to your desktop and + may perform risky actions. Use with caution. + + +To have the MCP server control your local desktop instead of a VM: + +1. **Start the Computer Server on your host:** + +```bash +pip install cua-computer-server +python -m computer_server +``` + +2. **Configure the MCP server to use your host system:** + +Add the `CUA_USE_HOST_COMPUTER_SERVER` environment variable to your MCP client configuration: + + + + Update your Claude Desktop config (see [Installation](/docs/libraries/mcp-server/installation)) to include the environment variable: + + ```json + { + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-5-20250929", + "CUA_USE_HOST_COMPUTER_SERVER": "true" + } + } + } + } + ``` + + + + Set the environment variable in your MCP client configuration: + + ```bash + export CUA_USE_HOST_COMPUTER_SERVER=true + ``` + + Then start your MCP client as usual. + + + + +3. **Restart your MCP client** (e.g., Claude Desktop) to apply the changes. + +Now Claude will control your local desktop directly when you ask it to perform computer tasks. + +## Usage Examples + +### Single Task Execution + +``` +"Open Safari and navigate to apple.com" +"Create a new folder on the desktop called 'My Projects'" +"Take a screenshot of the current screen" +``` + +### Multi-Task Execution (Sequential) + +``` +"Run these tasks in order: 1) Open Finder, 2) Navigate to Documents folder, 3) Create a new folder called 'Work'" +``` + +### Multi-Task Execution (Concurrent) + +``` +"Run these tasks simultaneously: 1) Open Chrome, 2) Open Safari, 3) Open Finder" +``` + +### Session Management + +``` +"Show me the current session statistics" +"Take a screenshot using session abc123" +"Cleanup session xyz789" +``` + +### Error Recovery + +``` +"Try to open a non-existent application and show me the error" +"Find all files with .tmp extension and delete them safely" +``` + +## First-time Usage Notes **API Keys**: Ensure you have valid API keys: -- Add your Anthropic API key, or other model provider API key in the Claude Desktop config (as shown above) +- Add your Anthropic API key in the Claude Desktop config (as shown above) - Or set it as an environment variable in your shell profile +- **Required**: The MCP server needs an API key to authenticate with the model provider + +**Model Selection**: Choose the appropriate model for your needs: + +- **Claude Sonnet 4**: Latest model with best performance (`anthropic/claude-sonnet-4-20250514`) +- **Computer-Use Preview**: Specialized for computer tasks (`openai/computer-use-preview`) +- **Local Models**: For privacy-sensitive environments +- **Ollama**: For offline usage diff --git a/docs/content/docs/meta.json b/docs/content/docs/meta.json index c3517f0a..199556f1 100644 --- a/docs/content/docs/meta.json +++ b/docs/content/docs/meta.json @@ -4,11 +4,10 @@ "root": true, "defaultOpen": true, "pages": [ - "index", - "quickstart-devs", - "quickstart-cli", - "telemetry", - "example-usecases", + "---[Rocket]Get Started---", + "...get-started", + "---[ChefHat]Cookbook---", + "...example-usecases", "---[BookCopy]Computer Playbook---", "...computer-sdk", "---[BookCopy]Agent Playbook---", diff --git a/docs/content/docs/quickstart-cli.mdx b/docs/content/docs/quickstart-cli.mdx deleted file mode 100644 index 6eb43649..00000000 --- a/docs/content/docs/quickstart-cli.mdx +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: Quickstart (CLI) -description: Get started with the Cua Agent CLI in 4 steps -icon: Rocket ---- - -import { Step, Steps } from 'fumadocs-ui/components/steps'; -import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; -import { Accordion, Accordions } from 'fumadocs-ui/components/accordion'; - -Get up and running with the Cua Agent CLI in 4 simple steps. - - - - -## Introduction - -Cua combines Computer (interface) + Agent (AI) for automating desktop apps. The Agent CLI provides a clean terminal interface to control your remote computer using natural language commands. - - - - - -## Set Up Your Computer Environment - -Choose how you want to run your Cua computer. **Cloud Sandbox is recommended** for the easiest setup: - - - - - **Easiest & safest way to get started - works on any host OS** - - 1. Go to [cua.ai/signin](https://cua.ai/signin) - 2. Navigate to **Dashboard > Containers > Create Instance** - 3. Create a **Medium, Ubuntu 22** container - 4. Note your container name and API key - - Your cloud container will be automatically configured and ready to use. - - - - - **Run Linux desktop locally on macOS, Windows, or Linux hosts** - - 1. Install Docker Desktop or Docker Engine - - 2. Pull the CUA XFCE container (lightweight desktop) - - ```bash - docker pull --platform=linux/amd64 trycua/cua-xfce:latest - ``` - - Or use KASM for a full-featured desktop: - - ```bash - docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest - ``` - - - - - **Windows hosts only - requires Windows 10 Pro/Enterprise or Windows 11** - - 1. Enable Windows Sandbox - 2. Install pywinsandbox dependency - - ```bash - pip install -U git+git://github.com/karkason/pywinsandbox.git - ``` - - 3. Windows Sandbox will be automatically configured when you run the CLI - - - - - **macOS hosts only - requires Lume CLI** - - 1. Install lume cli - - ```bash - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" - ``` - - 2. Start a local Cua macOS VM - - ```bash - lume run macos-sequoia-cua:latest - ``` - - - - - - - - -## Install Cua - - - - - -### Install uv - - - - -```bash -# Use curl to download the script and execute it with sh: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# If your system doesn't have curl, you can use wget: -# wget -qO- https://astral.sh/uv/install.sh | sh -``` - - - - -```powershell -# Use irm to download the script and execute it with iex: -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" -``` - - - - -### Install Python 3.12 - -```bash -uv python install 3.12 -# uv will install Cua dependencies automatically when you use --with "cua-agent[cli]" -``` - - - - - -### Install conda - - - - -```bash -mkdir -p ~/miniconda3 -curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh -bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 -rm ~/miniconda3/miniconda.sh -source ~/miniconda3/bin/activate -``` - - - - -```bash -mkdir -p ~/miniconda3 -wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh -bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 -rm ~/miniconda3/miniconda.sh -source ~/miniconda3/bin/activate -``` - - - - -```powershell -wget "https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe" -outfile ".\miniconda.exe" -Start-Process -FilePath ".\miniconda.exe" -ArgumentList "/S" -Wait -del .\miniconda.exe -``` - - - - -### Create and activate Python 3.12 environment - -```bash -conda create -n cua python=3.12 -conda activate cua -``` - -### Install Cua - -```bash -pip install "cua-agent[cli]" cua-computer -``` - - - - - -### Install Cua - -```bash -pip install "cua-agent[cli]" cua-computer -``` - - - - - - - - - -## Run Cua CLI - -Choose your preferred AI model: - -### OpenAI Computer Use Preview - - - - -```bash -uv run --with "cua-agent[cli]" -m agent.cli openai/computer-use-preview -``` - - - - -```bash -python -m agent.cli openai/computer-use-preview -``` - - - - -### Anthropic Claude - - - - -```bash -uv run --with "cua-agent[cli]" -m agent.cli anthropic/claude-sonnet-4-5-20250929 -uv run --with "cua-agent[cli]" -m agent.cli anthropic/claude-opus-4-20250514 -uv run --with "cua-agent[cli]" -m agent.cli anthropic/claude-opus-4-1-20250805 -uv run --with "cua-agent[cli]" -m agent.cli anthropic/claude-sonnet-4-20250514 -uv run --with "cua-agent[cli]" -m agent.cli anthropic/claude-3-5-sonnet-20241022 -``` - - - - -```bash -python -m agent.cli anthropic/claude-sonnet-4-5-20250929 -python -m agent.cli anthropic/claude-opus-4-1-20250805 -python -m agent.cli anthropic/claude-opus-4-20250514 -python -m agent.cli anthropic/claude-sonnet-4-20250514 -python -m agent.cli anthropic/claude-3-5-sonnet-20241022 -``` - - - - -### Omniparser + LLMs - - - - -```bash -uv run --with "cua-agent[cli]" -m agent.cli omniparser+anthropic/claude-3-5-sonnet-20241022 -uv run --with "cua-agent[cli]" -m agent.cli omniparser+openai/gpt-4o -uv run --with "cua-agent[cli]" -m agent.cli omniparser+vertex_ai/gemini-pro -``` - - - - -```bash -python -m agent.cli omniparser+anthropic/claude-3-5-sonnet-20241022 -python -m agent.cli omniparser+openai/gpt-4o -python -m agent.cli omniparser+vertex_ai/gemini-pro -``` - - - - -### Local Models - - - - -```bash -# Hugging Face models (local) -uv run --with "cua-agent[cli]" -m agent.cli huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B - -# MLX models (Apple Silicon) -uv run --with "cua-agent[cli]" -m agent.cli mlx/mlx-community/UI-TARS-1.5-7B-6bit - -# Ollama models -uv run --with "cua-agent[cli]" -m agent.cli omniparser+ollama_chat/llama3.2:latest -``` - - - - -```bash -# Hugging Face models (local) -python -m agent.cli huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B - -# MLX models (Apple Silicon) -python -m agent.cli mlx/mlx-community/UI-TARS-1.5-7B-6bit - -# Ollama models -python -m agent.cli omniparser+ollama_chat/llama3.2:latest -``` - - - - -### Interactive Setup - -If you haven't set up environment variables, the CLI will guide you through the setup: - -1. **Sandbox Name**: Enter your Cua sandbox name (or get one at [cua.ai](https://cua.ai/)) -2. **CUA API Key**: Enter your Cua API key -3. **Provider API Key**: Enter your AI provider API key (OpenAI, Anthropic, etc.) - -### Start Chatting - -Once connected, you'll see: - -``` -πŸ’» Connected to your-container-name (model, agent_loop) -Type 'exit' to quit. - -> -``` - -You can ask your agent to perform actions like: - -- "Take a screenshot and tell me what's on the screen" -- "Open Firefox and go to github.com" -- "Type 'Hello world' into the terminal" -- "Close the current window" -- "Click on the search button" - - - - ---- - -For running models locally, see [Running Models Locally](/agent-sdk/supported-model-providers/local-models). diff --git a/docs/content/docs/quickstart-devs.mdx b/docs/content/docs/quickstart-devs.mdx deleted file mode 100644 index e4b13bb0..00000000 --- a/docs/content/docs/quickstart-devs.mdx +++ /dev/null @@ -1,313 +0,0 @@ ---- -title: Quickstart -description: Get started with Cua in three steps -icon: Rocket ---- - -import { Step, Steps } from 'fumadocs-ui/components/steps'; -import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; - -This quickstart guides you through setting up your [computer environment](#set-up-your-computer-environment), programmatic control with a [Cua computer](#using-computer), and task automation with a [Cua agent](#using-agent): - - - - - -## Set Up Your Computer Environment - -Choose how you want to run your Cua computer. This will be the environment where your automated tasks will execute. - -You can run your Cua computer in the cloud (recommended for easiest setup), locally on macOS with Lume, locally on Windows with a Windows Sandbox, or in a Docker container on any platform. Choose the option that matches your system and needs. - - - - - Cua Cloud Sandbox provides virtual machines that run Ubuntu. - - 1. Go to [cua.ai/signin](https://cua.ai/signin) - 2. Navigate to **Dashboard > Containers > Create Instance** - 3. Create a **Medium, Ubuntu 22** sandbox - 4. Note your sandbox name and API key - - Your Cloud Sandbox will be automatically configured and ready to use. - - - - - Lume containers are macOS virtual machines that run on a macOS host machine. - - 1. Install the Lume CLI: - - ```bash - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" - ``` - - 2. Start a local Cua sandbox: - - ```bash - lume run macos-sequoia-cua:latest - ``` - - - - -Windows Sandbox provides Windows virtual environments that run on a Windows host machine. - - 1. Enable [Windows Sandbox](https://learn.microsoft.com/en-us/windows/security/application-security/application-isolation/windows-sandbox/windows-sandbox-install) (requires Windows 10 Pro/Enterprise or Windows 11) - 2. Install the `pywinsandbox` dependency: - - ```bash - pip install -U git+git://github.com/karkason/pywinsandbox.git - ``` - - 3. Windows Sandbox will be automatically configured when you run the CLI - - - - -Docker provides a way to run Ubuntu containers on any host machine. - - 1. Install Docker Desktop or Docker Engine: - - 2. Pull the CUA Ubuntu sandbox: - - ```bash - docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest - ``` - - - - - - - - -## Using Computer - -Connect to your Cua computer and perform basic interactions, such as taking screenshots or simulating user input. - - - - Install the Cua computer Python SDK: - ```bash - pip install cua-computer - ``` - - Then, connect to your desired computer environment: - - - - ```python - from computer import Computer - - computer = Computer( - os_type="linux", - provider_type="cloud", - name="your-sandbox-name", - api_key="your-api-key" - ) - await computer.run() # Connect to the sandbox - ``` - - - ```python - from computer import Computer - - computer = Computer( - os_type="macos", - provider_type="lume", - name="macos-sequoia-cua:latest" - ) - await computer.run() # Launch & connect to the container - ``` - - - ```python - from computer import Computer - - computer = Computer( - os_type="windows", - provider_type="windows_sandbox" - ) - await computer.run() # Launch & connect to the container - ``` - - - ```python - from computer import Computer - - computer = Computer( - os_type="linux", - provider_type="docker", - name="trycua/cua-ubuntu:latest" - ) - await computer.run() # Launch & connect to the container - ``` - - - Install and run `cua-computer-server`: - ```bash - pip install cua-computer-server - python -m computer_server - ``` - - Then, use the `Computer` object to connect: - ```python - from computer import Computer - - computer = Computer(use_host_computer_server=True) - await computer.run() # Connect to the host desktop - ``` - - - - Once connected, you can perform interactions: - ```python - try: - # Take a screenshot of the computer's current display - screenshot = await computer.interface.screenshot() - # Simulate a left-click at coordinates (100, 100) - await computer.interface.left_click(100, 100) - # Type "Hello!" into the active application - await computer.interface.type("Hello!") - finally: - await computer.close() - ``` - - - - Install the Cua computer TypeScript SDK: - ```bash - npm install @trycua/computer - ``` - - Then, connect to your desired computer environment: - - - - ```typescript - import { Computer, OSType } from '@trycua/computer'; - - const computer = new Computer({ - osType: OSType.LINUX, - name: "your-sandbox-name", - apiKey: "your-api-key" - }); - await computer.run(); // Connect to the sandbox - ``` - - - ```typescript - import { Computer, OSType, ProviderType } from '@trycua/computer'; - - const computer = new Computer({ - osType: OSType.MACOS, - providerType: ProviderType.LUME, - name: "macos-sequoia-cua:latest" - }); - await computer.run(); // Launch & connect to the container - ``` - - - ```typescript - import { Computer, OSType, ProviderType } from '@trycua/computer'; - - const computer = new Computer({ - osType: OSType.WINDOWS, - providerType: ProviderType.WINDOWS_SANDBOX - }); - await computer.run(); // Launch & connect to the container - ``` - - - ```typescript - import { Computer, OSType, ProviderType } from '@trycua/computer'; - - const computer = new Computer({ - osType: OSType.LINUX, - providerType: ProviderType.DOCKER, - name: "trycua/cua-ubuntu:latest" - }); - await computer.run(); // Launch & connect to the container - ``` - - - First, install and run `cua-computer-server`: - ```bash - pip install cua-computer-server - python -m computer_server - ``` - - Then, use the `Computer` object to connect: - ```typescript - import { Computer } from '@trycua/computer'; - - const computer = new Computer({ useHostComputerServer: true }); - await computer.run(); // Connect to the host desktop - ``` - - - - Once connected, you can perform interactions: - ```typescript - try { - // Take a screenshot of the computer's current display - const screenshot = await computer.interface.screenshot(); - // Simulate a left-click at coordinates (100, 100) - await computer.interface.leftClick(100, 100); - // Type "Hello!" into the active application - await computer.interface.typeText("Hello!"); - } finally { - await computer.close(); - } - ``` - - - - -Learn more about computers in the [Cua computers documentation](/computer-sdk/computers). You will see how to automate computers with agents in the next step. - - - - - -## Using Agent - -Utilize an Agent to automate complex tasks by providing it with a goal and allowing it to interact with the computer environment. - -Install the Cua agent Python SDK: - -```bash -pip install "cua-agent[all]" -``` - -Then, use the `ComputerAgent` object: - -```python -from agent import ComputerAgent - -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer], - max_trajectory_budget=5.0 -) - -messages = [{"role": "user", "content": "Take a screenshot and tell me what you see"}] - -async for result in agent.run(messages): - for item in result["output"]: - if item["type"] == "message": - print(item["content"][0]["text"]) -``` - -Learn more about agents in [Agent Loops](/agent-sdk/agent-loops) and available models in [Supported Models](/agent-sdk/supported-model-providers/). - - - - -## Next Steps - -- Learn more about [Cua computers](/computer-sdk/computers) and [computer commands](/computer-sdk/commands) -- Read about [Agent loops](/agent-sdk/agent-loops), [tools](/agent-sdk/custom-tools), and [supported model providers](/agent-sdk/supported-model-providers/) -- Join our [Discord community](https://discord.com/invite/mVnXXpdE85) for help -- Try out [Form Filling](/example-usecases/form-filling) preset usecase diff --git a/docs/next.config.mjs b/docs/next.config.mjs index 472674c4..fdb996c1 100644 --- a/docs/next.config.mjs +++ b/docs/next.config.mjs @@ -24,6 +24,39 @@ const config = { basePath: false, // Important: this bypasses the basePath permanent: false, }, + // Redirect old docs.cua.ai URLs to cua.ai/docs with 301 for SEO + // This handles URLs that Google has indexed from the old domain + { + source: '/:path*', + has: [ + { + type: 'host', + value: 'docs.cua.ai', + }, + ], + destination: 'https://cua.ai/docs/:path*', + permanent: true, // 301 redirect to preserve SEO authority + basePath: false, + }, + // Redirects for documentation restructure (PR #568) + // Moved quickstart-devs to get-started section + { + source: '/quickstart-devs', + destination: '/get-started/quickstart', + permanent: true, + }, + // Moved telemetry to agent-sdk section + { + source: '/telemetry', + destination: '/agent-sdk/telemetry', + permanent: true, + }, + // Removed quickstart-cli, consolidated into main quickstart + { + source: '/quickstart-cli', + destination: '/get-started/quickstart', + permanent: true, + }, ]; }, images: { diff --git a/docs/package.json b/docs/package.json index 2008d840..79f29c70 100644 --- a/docs/package.json +++ b/docs/package.json @@ -9,22 +9,22 @@ "postinstall": "fumadocs-mdx" }, "dependencies": { - "fumadocs-core": "15.5.1", - "fumadocs-mdx": "11.6.7", - "fumadocs-ui": "15.5.1", + "fumadocs-core": "16.0.8", + "fumadocs-mdx": "13.0.5", + "fumadocs-ui": "16.0.8", "lucide-react": "^0.525.0", "mermaid": "^11.8.1", - "next": "15.3.3", + "next": "16.0.1", "next-themes": "^0.4.6", "posthog-js": "^1.276.0", - "react": "^19.1.0", - "react-dom": "^19.1.0", + "react": "^19.2.0", + "react-dom": "^19.2.0", "react-icons": "^5.5.0", "remark": "^15.0.1", "remark-gfm": "^4.0.1", "remark-mdx": "^3.1.0", "tailwind-merge": "^3.3.1", - "zod": "^3.25.76" + "zod": "^4.1.12" }, "devDependencies": { "@tailwindcss/postcss": "^4.1.8", diff --git a/docs/pnpm-lock.yaml b/docs/pnpm-lock.yaml index aad925c4..03428696 100644 --- a/docs/pnpm-lock.yaml +++ b/docs/pnpm-lock.yaml @@ -9,38 +9,38 @@ importers: .: dependencies: fumadocs-core: - specifier: 15.5.1 - version: 15.5.1(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + specifier: 16.0.8 + version: 16.0.8(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0) fumadocs-mdx: - specifier: 11.6.7 - version: 11.6.7(acorn@8.15.0)(fumadocs-core@15.5.1(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0)) + specifier: 13.0.5 + version: 13.0.5(fumadocs-core@16.0.8(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react@19.2.0) fumadocs-ui: - specifier: 15.5.1 - version: 15.5.1(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tailwindcss@4.1.10) + specifier: 16.0.8 + version: 16.0.8(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(tailwindcss@4.1.10) lucide-react: specifier: ^0.525.0 - version: 0.525.0(react@19.1.0) + version: 0.525.0(react@19.2.0) mermaid: specifier: ^11.8.1 version: 11.8.1 next: - specifier: 15.3.3 - version: 15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + specifier: 16.0.1 + version: 16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0) next-themes: specifier: ^0.4.6 - version: 0.4.6(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 0.4.6(react-dom@19.2.0(react@19.2.0))(react@19.2.0) posthog-js: specifier: ^1.276.0 version: 1.276.0 react: - specifier: ^19.1.0 - version: 19.1.0 + specifier: ^19.2.0 + version: 19.2.0 react-dom: - specifier: ^19.1.0 - version: 19.1.0(react@19.1.0) + specifier: ^19.2.0 + version: 19.2.0(react@19.2.0) react-icons: specifier: ^5.5.0 - version: 5.5.0(react@19.1.0) + version: 5.5.0(react@19.2.0) remark: specifier: ^15.0.1 version: 15.0.1 @@ -54,8 +54,8 @@ importers: specifier: ^3.3.1 version: 3.3.1 zod: - specifier: ^3.25.76 - version: 3.25.76 + specifier: ^4.1.12 + version: 4.1.12 devDependencies: '@tailwindcss/postcss': specifier: ^4.1.8 @@ -119,155 +119,161 @@ packages: '@chevrotain/utils@11.0.3': resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} - '@emnapi/runtime@1.4.3': - resolution: {integrity: sha512-pBPWdu6MLKROBX05wSNKcNb++m5Er+KQ9QkB+WVM+pW2Kx9hoSrVTnu3BdkI5eBLZoKu/J6mW/B6i6bJB2ytXQ==} + '@emnapi/runtime@1.7.0': + resolution: {integrity: sha512-oAYoQnCYaQZKVS53Fq23ceWMRxq5EhQsE0x0RdQ55jT7wagMu5k+fS39v1fiSLrtrLQlXwVINenqhLMtTrV/1Q==} - '@esbuild/aix-ppc64@0.25.5': - resolution: {integrity: sha512-9o3TMmpmftaCMepOdA5k/yDw8SfInyzWWTjYTFCX3kPSDJMROQTb8jg+h9Cnwnmm1vOzvxN7gIfB5V2ewpjtGA==} + '@esbuild/aix-ppc64@0.25.12': + resolution: {integrity: sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==} engines: {node: '>=18'} cpu: [ppc64] os: [aix] - '@esbuild/android-arm64@0.25.5': - resolution: {integrity: sha512-VGzGhj4lJO+TVGV1v8ntCZWJktV7SGCs3Pn1GRWI1SBFtRALoomm8k5E9Pmwg3HOAal2VDc2F9+PM/rEY6oIDg==} + '@esbuild/android-arm64@0.25.12': + resolution: {integrity: sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==} engines: {node: '>=18'} cpu: [arm64] os: [android] - '@esbuild/android-arm@0.25.5': - resolution: {integrity: sha512-AdJKSPeEHgi7/ZhuIPtcQKr5RQdo6OO2IL87JkianiMYMPbCtot9fxPbrMiBADOWWm3T2si9stAiVsGbTQFkbA==} + '@esbuild/android-arm@0.25.12': + resolution: {integrity: sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==} engines: {node: '>=18'} cpu: [arm] os: [android] - '@esbuild/android-x64@0.25.5': - resolution: {integrity: sha512-D2GyJT1kjvO//drbRT3Hib9XPwQeWd9vZoBJn+bu/lVsOZ13cqNdDeqIF/xQ5/VmWvMduP6AmXvylO/PIc2isw==} + '@esbuild/android-x64@0.25.12': + resolution: {integrity: sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==} engines: {node: '>=18'} cpu: [x64] os: [android] - '@esbuild/darwin-arm64@0.25.5': - resolution: {integrity: sha512-GtaBgammVvdF7aPIgH2jxMDdivezgFu6iKpmT+48+F8Hhg5J/sfnDieg0aeG/jfSvkYQU2/pceFPDKlqZzwnfQ==} + '@esbuild/darwin-arm64@0.25.12': + resolution: {integrity: sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==} engines: {node: '>=18'} cpu: [arm64] os: [darwin] - '@esbuild/darwin-x64@0.25.5': - resolution: {integrity: sha512-1iT4FVL0dJ76/q1wd7XDsXrSW+oLoquptvh4CLR4kITDtqi2e/xwXwdCVH8hVHU43wgJdsq7Gxuzcs6Iq/7bxQ==} + '@esbuild/darwin-x64@0.25.12': + resolution: {integrity: sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==} engines: {node: '>=18'} cpu: [x64] os: [darwin] - '@esbuild/freebsd-arm64@0.25.5': - resolution: {integrity: sha512-nk4tGP3JThz4La38Uy/gzyXtpkPW8zSAmoUhK9xKKXdBCzKODMc2adkB2+8om9BDYugz+uGV7sLmpTYzvmz6Sw==} + '@esbuild/freebsd-arm64@0.25.12': + resolution: {integrity: sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==} engines: {node: '>=18'} cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-x64@0.25.5': - resolution: {integrity: sha512-PrikaNjiXdR2laW6OIjlbeuCPrPaAl0IwPIaRv+SMV8CiM8i2LqVUHFC1+8eORgWyY7yhQY+2U2fA55mBzReaw==} + '@esbuild/freebsd-x64@0.25.12': + resolution: {integrity: sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==} engines: {node: '>=18'} cpu: [x64] os: [freebsd] - '@esbuild/linux-arm64@0.25.5': - resolution: {integrity: sha512-Z9kfb1v6ZlGbWj8EJk9T6czVEjjq2ntSYLY2cw6pAZl4oKtfgQuS4HOq41M/BcoLPzrUbNd+R4BXFyH//nHxVg==} + '@esbuild/linux-arm64@0.25.12': + resolution: {integrity: sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==} engines: {node: '>=18'} cpu: [arm64] os: [linux] - '@esbuild/linux-arm@0.25.5': - resolution: {integrity: sha512-cPzojwW2okgh7ZlRpcBEtsX7WBuqbLrNXqLU89GxWbNt6uIg78ET82qifUy3W6OVww6ZWobWub5oqZOVtwolfw==} + '@esbuild/linux-arm@0.25.12': + resolution: {integrity: sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==} engines: {node: '>=18'} cpu: [arm] os: [linux] - '@esbuild/linux-ia32@0.25.5': - resolution: {integrity: sha512-sQ7l00M8bSv36GLV95BVAdhJ2QsIbCuCjh/uYrWiMQSUuV+LpXwIqhgJDcvMTj+VsQmqAHL2yYaasENvJ7CDKA==} + '@esbuild/linux-ia32@0.25.12': + resolution: {integrity: sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==} engines: {node: '>=18'} cpu: [ia32] os: [linux] - '@esbuild/linux-loong64@0.25.5': - resolution: {integrity: sha512-0ur7ae16hDUC4OL5iEnDb0tZHDxYmuQyhKhsPBV8f99f6Z9KQM02g33f93rNH5A30agMS46u2HP6qTdEt6Q1kg==} + '@esbuild/linux-loong64@0.25.12': + resolution: {integrity: sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==} engines: {node: '>=18'} cpu: [loong64] os: [linux] - '@esbuild/linux-mips64el@0.25.5': - resolution: {integrity: sha512-kB/66P1OsHO5zLz0i6X0RxlQ+3cu0mkxS3TKFvkb5lin6uwZ/ttOkP3Z8lfR9mJOBk14ZwZ9182SIIWFGNmqmg==} + '@esbuild/linux-mips64el@0.25.12': + resolution: {integrity: sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==} engines: {node: '>=18'} cpu: [mips64el] os: [linux] - '@esbuild/linux-ppc64@0.25.5': - resolution: {integrity: sha512-UZCmJ7r9X2fe2D6jBmkLBMQetXPXIsZjQJCjgwpVDz+YMcS6oFR27alkgGv3Oqkv07bxdvw7fyB71/olceJhkQ==} + '@esbuild/linux-ppc64@0.25.12': + resolution: {integrity: sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==} engines: {node: '>=18'} cpu: [ppc64] os: [linux] - '@esbuild/linux-riscv64@0.25.5': - resolution: {integrity: sha512-kTxwu4mLyeOlsVIFPfQo+fQJAV9mh24xL+y+Bm6ej067sYANjyEw1dNHmvoqxJUCMnkBdKpvOn0Ahql6+4VyeA==} + '@esbuild/linux-riscv64@0.25.12': + resolution: {integrity: sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==} engines: {node: '>=18'} cpu: [riscv64] os: [linux] - '@esbuild/linux-s390x@0.25.5': - resolution: {integrity: sha512-K2dSKTKfmdh78uJ3NcWFiqyRrimfdinS5ErLSn3vluHNeHVnBAFWC8a4X5N+7FgVE1EjXS1QDZbpqZBjfrqMTQ==} + '@esbuild/linux-s390x@0.25.12': + resolution: {integrity: sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==} engines: {node: '>=18'} cpu: [s390x] os: [linux] - '@esbuild/linux-x64@0.25.5': - resolution: {integrity: sha512-uhj8N2obKTE6pSZ+aMUbqq+1nXxNjZIIjCjGLfsWvVpy7gKCOL6rsY1MhRh9zLtUtAI7vpgLMK6DxjO8Qm9lJw==} + '@esbuild/linux-x64@0.25.12': + resolution: {integrity: sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==} engines: {node: '>=18'} cpu: [x64] os: [linux] - '@esbuild/netbsd-arm64@0.25.5': - resolution: {integrity: sha512-pwHtMP9viAy1oHPvgxtOv+OkduK5ugofNTVDilIzBLpoWAM16r7b/mxBvfpuQDpRQFMfuVr5aLcn4yveGvBZvw==} + '@esbuild/netbsd-arm64@0.25.12': + resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} engines: {node: '>=18'} cpu: [arm64] os: [netbsd] - '@esbuild/netbsd-x64@0.25.5': - resolution: {integrity: sha512-WOb5fKrvVTRMfWFNCroYWWklbnXH0Q5rZppjq0vQIdlsQKuw6mdSihwSo4RV/YdQ5UCKKvBy7/0ZZYLBZKIbwQ==} + '@esbuild/netbsd-x64@0.25.12': + resolution: {integrity: sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==} engines: {node: '>=18'} cpu: [x64] os: [netbsd] - '@esbuild/openbsd-arm64@0.25.5': - resolution: {integrity: sha512-7A208+uQKgTxHd0G0uqZO8UjK2R0DDb4fDmERtARjSHWxqMTye4Erz4zZafx7Di9Cv+lNHYuncAkiGFySoD+Mw==} + '@esbuild/openbsd-arm64@0.25.12': + resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} engines: {node: '>=18'} cpu: [arm64] os: [openbsd] - '@esbuild/openbsd-x64@0.25.5': - resolution: {integrity: sha512-G4hE405ErTWraiZ8UiSoesH8DaCsMm0Cay4fsFWOOUcz8b8rC6uCvnagr+gnioEjWn0wC+o1/TAHt+It+MpIMg==} + '@esbuild/openbsd-x64@0.25.12': + resolution: {integrity: sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==} engines: {node: '>=18'} cpu: [x64] os: [openbsd] - '@esbuild/sunos-x64@0.25.5': - resolution: {integrity: sha512-l+azKShMy7FxzY0Rj4RCt5VD/q8mG/e+mDivgspo+yL8zW7qEwctQ6YqKX34DTEleFAvCIUviCFX1SDZRSyMQA==} + '@esbuild/openharmony-arm64@0.25.12': + resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.25.12': + resolution: {integrity: sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==} engines: {node: '>=18'} cpu: [x64] os: [sunos] - '@esbuild/win32-arm64@0.25.5': - resolution: {integrity: sha512-O2S7SNZzdcFG7eFKgvwUEZ2VG9D/sn/eIiz8XRZ1Q/DO5a3s76Xv0mdBzVM5j5R639lXQmPmSo0iRpHqUUrsxw==} + '@esbuild/win32-arm64@0.25.12': + resolution: {integrity: sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==} engines: {node: '>=18'} cpu: [arm64] os: [win32] - '@esbuild/win32-ia32@0.25.5': - resolution: {integrity: sha512-onOJ02pqs9h1iMJ1PQphR+VZv8qBMQ77Klcsqv9CNW2w6yLqoURLcgERAIurY6QE63bbLuqgP9ATqajFLK5AMQ==} + '@esbuild/win32-ia32@0.25.12': + resolution: {integrity: sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==} engines: {node: '>=18'} cpu: [ia32] os: [win32] - '@esbuild/win32-x64@0.25.5': - resolution: {integrity: sha512-TXv6YnJ8ZMVdX+SXWVBo/0p8LTcrUYngpWjvm91TMjjBQii7Oz11Lw5lbDV5Y0TzuhSJHwiH4hEtC1I42mMS0g==} + '@esbuild/win32-x64@0.25.12': + resolution: {integrity: sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==} engines: {node: '>=18'} cpu: [x64] os: [win32] @@ -287,8 +293,8 @@ packages: '@floating-ui/utils@0.2.9': resolution: {integrity: sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==} - '@formatjs/intl-localematcher@0.6.1': - resolution: {integrity: sha512-ePEgLgVCqi2BBFnTMWPfIghu6FkbZnnBVhO2sSxvLfrdFw7wCHAHiDoM2h4NRgjbaY7+B7HgOLZGkK187pZTZg==} + '@formatjs/intl-localematcher@0.6.2': + resolution: {integrity: sha512-XOMO2Hupl0wdd172Y06h6kLpBz6Dv+J4okPLl4LPtzbr8f66WbIoy4ev98EBuZ6ZK4h5ydTN6XneT4QVpD7cdA==} '@iconify/types@2.0.0': resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} @@ -296,118 +302,139 @@ packages: '@iconify/utils@2.3.0': resolution: {integrity: sha512-GmQ78prtwYW6EtzXRU1rY+KwOKfz32PD7iJh6Iyqw68GiKuoZ2A6pRtzWONz5VQJbp50mEjXh/7NkumtrAgRKA==} - '@img/sharp-darwin-arm64@0.34.2': - resolution: {integrity: sha512-OfXHZPppddivUJnqyKoi5YVeHRkkNE2zUFT2gbpKxp/JZCFYEYubnMg+gOp6lWfasPrTS+KPosKqdI+ELYVDtg==} + '@img/colour@1.0.0': + resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==} + engines: {node: '>=18'} + + '@img/sharp-darwin-arm64@0.34.5': + resolution: {integrity: sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [darwin] - '@img/sharp-darwin-x64@0.34.2': - resolution: {integrity: sha512-dYvWqmjU9VxqXmjEtjmvHnGqF8GrVjM2Epj9rJ6BUIXvk8slvNDJbhGFvIoXzkDhrJC2jUxNLz/GUjjvSzfw+g==} + '@img/sharp-darwin-x64@0.34.5': + resolution: {integrity: sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [darwin] - '@img/sharp-libvips-darwin-arm64@1.1.0': - resolution: {integrity: sha512-HZ/JUmPwrJSoM4DIQPv/BfNh9yrOA8tlBbqbLz4JZ5uew2+o22Ik+tHQJcih7QJuSa0zo5coHTfD5J8inqj9DA==} + '@img/sharp-libvips-darwin-arm64@1.2.4': + resolution: {integrity: sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==} cpu: [arm64] os: [darwin] - '@img/sharp-libvips-darwin-x64@1.1.0': - resolution: {integrity: sha512-Xzc2ToEmHN+hfvsl9wja0RlnXEgpKNmftriQp6XzY/RaSfwD9th+MSh0WQKzUreLKKINb3afirxW7A0fz2YWuQ==} + '@img/sharp-libvips-darwin-x64@1.2.4': + resolution: {integrity: sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==} cpu: [x64] os: [darwin] - '@img/sharp-libvips-linux-arm64@1.1.0': - resolution: {integrity: sha512-IVfGJa7gjChDET1dK9SekxFFdflarnUB8PwW8aGwEoF3oAsSDuNUTYS+SKDOyOJxQyDC1aPFMuRYLoDInyV9Ew==} + '@img/sharp-libvips-linux-arm64@1.2.4': + resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==} cpu: [arm64] os: [linux] - '@img/sharp-libvips-linux-arm@1.1.0': - resolution: {integrity: sha512-s8BAd0lwUIvYCJyRdFqvsj+BJIpDBSxs6ivrOPm/R7piTs5UIwY5OjXrP2bqXC9/moGsyRa37eYWYCOGVXxVrA==} + '@img/sharp-libvips-linux-arm@1.2.4': + resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==} cpu: [arm] os: [linux] - '@img/sharp-libvips-linux-ppc64@1.1.0': - resolution: {integrity: sha512-tiXxFZFbhnkWE2LA8oQj7KYR+bWBkiV2nilRldT7bqoEZ4HiDOcePr9wVDAZPi/Id5fT1oY9iGnDq20cwUz8lQ==} + '@img/sharp-libvips-linux-ppc64@1.2.4': + resolution: {integrity: sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==} cpu: [ppc64] os: [linux] - '@img/sharp-libvips-linux-s390x@1.1.0': - resolution: {integrity: sha512-xukSwvhguw7COyzvmjydRb3x/09+21HykyapcZchiCUkTThEQEOMtBj9UhkaBRLuBrgLFzQ2wbxdeCCJW/jgJA==} + '@img/sharp-libvips-linux-riscv64@1.2.4': + resolution: {integrity: sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==} + cpu: [riscv64] + os: [linux] + + '@img/sharp-libvips-linux-s390x@1.2.4': + resolution: {integrity: sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==} cpu: [s390x] os: [linux] - '@img/sharp-libvips-linux-x64@1.1.0': - resolution: {integrity: sha512-yRj2+reB8iMg9W5sULM3S74jVS7zqSzHG3Ol/twnAAkAhnGQnpjj6e4ayUz7V+FpKypwgs82xbRdYtchTTUB+Q==} + '@img/sharp-libvips-linux-x64@1.2.4': + resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==} cpu: [x64] os: [linux] - '@img/sharp-libvips-linuxmusl-arm64@1.1.0': - resolution: {integrity: sha512-jYZdG+whg0MDK+q2COKbYidaqW/WTz0cc1E+tMAusiDygrM4ypmSCjOJPmFTvHHJ8j/6cAGyeDWZOsK06tP33w==} + '@img/sharp-libvips-linuxmusl-arm64@1.2.4': + resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==} cpu: [arm64] os: [linux] - '@img/sharp-libvips-linuxmusl-x64@1.1.0': - resolution: {integrity: sha512-wK7SBdwrAiycjXdkPnGCPLjYb9lD4l6Ze2gSdAGVZrEL05AOUJESWU2lhlC+Ffn5/G+VKuSm6zzbQSzFX/P65A==} + '@img/sharp-libvips-linuxmusl-x64@1.2.4': + resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==} cpu: [x64] os: [linux] - '@img/sharp-linux-arm64@0.34.2': - resolution: {integrity: sha512-D8n8wgWmPDakc83LORcfJepdOSN6MvWNzzz2ux0MnIbOqdieRZwVYY32zxVx+IFUT8er5KPcyU3XXsn+GzG/0Q==} + '@img/sharp-linux-arm64@0.34.5': + resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] - '@img/sharp-linux-arm@0.34.2': - resolution: {integrity: sha512-0DZzkvuEOqQUP9mo2kjjKNok5AmnOr1jB2XYjkaoNRwpAYMDzRmAqUIa1nRi58S2WswqSfPOWLNOr0FDT3H5RQ==} + '@img/sharp-linux-arm@0.34.5': + resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm] os: [linux] - '@img/sharp-linux-s390x@0.34.2': - resolution: {integrity: sha512-EGZ1xwhBI7dNISwxjChqBGELCWMGDvmxZXKjQRuqMrakhO8QoMgqCrdjnAqJq/CScxfRn+Bb7suXBElKQpPDiw==} + '@img/sharp-linux-ppc64@0.34.5': + resolution: {integrity: sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ppc64] + os: [linux] + + '@img/sharp-linux-riscv64@0.34.5': + resolution: {integrity: sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [riscv64] + os: [linux] + + '@img/sharp-linux-s390x@0.34.5': + resolution: {integrity: sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [s390x] os: [linux] - '@img/sharp-linux-x64@0.34.2': - resolution: {integrity: sha512-sD7J+h5nFLMMmOXYH4DD9UtSNBD05tWSSdWAcEyzqW8Cn5UxXvsHAxmxSesYUsTOBmUnjtxghKDl15EvfqLFbQ==} + '@img/sharp-linux-x64@0.34.5': + resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] - '@img/sharp-linuxmusl-arm64@0.34.2': - resolution: {integrity: sha512-NEE2vQ6wcxYav1/A22OOxoSOGiKnNmDzCYFOZ949xFmrWZOVII1Bp3NqVVpvj+3UeHMFyN5eP/V5hzViQ5CZNA==} + '@img/sharp-linuxmusl-arm64@0.34.5': + resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] - '@img/sharp-linuxmusl-x64@0.34.2': - resolution: {integrity: sha512-DOYMrDm5E6/8bm/yQLCWyuDJwUnlevR8xtF8bs+gjZ7cyUNYXiSf/E8Kp0Ss5xasIaXSHzb888V1BE4i1hFhAA==} + '@img/sharp-linuxmusl-x64@0.34.5': + resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] - '@img/sharp-wasm32@0.34.2': - resolution: {integrity: sha512-/VI4mdlJ9zkaq53MbIG6rZY+QRN3MLbR6usYlgITEzi4Rpx5S6LFKsycOQjkOGmqTNmkIdLjEvooFKwww6OpdQ==} + '@img/sharp-wasm32@0.34.5': + resolution: {integrity: sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [wasm32] - '@img/sharp-win32-arm64@0.34.2': - resolution: {integrity: sha512-cfP/r9FdS63VA5k0xiqaNaEoGxBg9k7uE+RQGzuK9fHt7jib4zAVVseR9LsE4gJcNWgT6APKMNnCcnyOtmSEUQ==} + '@img/sharp-win32-arm64@0.34.5': + resolution: {integrity: sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [win32] - '@img/sharp-win32-ia32@0.34.2': - resolution: {integrity: sha512-QLjGGvAbj0X/FXl8n1WbtQ6iVBpWU7JO94u/P2M4a8CFYsvQi4GW2mRy/JqkRx0qpBzaOdKJKw8uc930EX2AHw==} + '@img/sharp-win32-ia32@0.34.5': + resolution: {integrity: sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [ia32] os: [win32] - '@img/sharp-win32-x64@0.34.2': - resolution: {integrity: sha512-aUdT6zEYtDKCaxkofmmJDJYGCf0+pJg3eU9/oBuqvEeoB9dKI6ZLc/1iLJCTuJQDO4ptntAlkUmHgGjyuobZbw==} + '@img/sharp-win32-x64@0.34.5': + resolution: {integrity: sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [win32] @@ -434,65 +461,65 @@ packages: '@jridgewell/trace-mapping@0.3.25': resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} - '@mdx-js/mdx@3.1.0': - resolution: {integrity: sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw==} + '@mdx-js/mdx@3.1.1': + resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==} '@mermaid-js/parser@0.6.1': resolution: {integrity: sha512-lCQNpV8R4lgsGcjX5667UiuDLk2micCtjtxR1YKbBXvN5w2v+FeLYoHrTSSrjwXdMcDYvE4ZBPvKT31dfeSmmA==} - '@next/env@15.3.3': - resolution: {integrity: sha512-OdiMrzCl2Xi0VTjiQQUK0Xh7bJHnOuET2s+3V+Y40WJBAXrJeGA3f+I8MZJ/YQ3mVGi5XGR1L66oFlgqXhQ4Vw==} + '@next/env@16.0.1': + resolution: {integrity: sha512-LFvlK0TG2L3fEOX77OC35KowL8D7DlFF45C0OvKMC4hy8c/md1RC4UMNDlUGJqfCoCS2VWrZ4dSE6OjaX5+8mw==} - '@next/swc-darwin-arm64@15.3.3': - resolution: {integrity: sha512-WRJERLuH+O3oYB4yZNVahSVFmtxRNjNF1I1c34tYMoJb0Pve+7/RaLAJJizyYiFhjYNGHRAE1Ri2Fd23zgDqhg==} + '@next/swc-darwin-arm64@16.0.1': + resolution: {integrity: sha512-R0YxRp6/4W7yG1nKbfu41bp3d96a0EalonQXiMe+1H9GTHfKxGNCGFNWUho18avRBPsO8T3RmdWuzmfurlQPbg==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@next/swc-darwin-x64@15.3.3': - resolution: {integrity: sha512-XHdzH/yBc55lu78k/XwtuFR/ZXUTcflpRXcsu0nKmF45U96jt1tsOZhVrn5YH+paw66zOANpOnFQ9i6/j+UYvw==} + '@next/swc-darwin-x64@16.0.1': + resolution: {integrity: sha512-kETZBocRux3xITiZtOtVoVvXyQLB7VBxN7L6EPqgI5paZiUlnsgYv4q8diTNYeHmF9EiehydOBo20lTttCbHAg==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@next/swc-linux-arm64-gnu@15.3.3': - resolution: {integrity: sha512-VZ3sYL2LXB8znNGcjhocikEkag/8xiLgnvQts41tq6i+wql63SMS1Q6N8RVXHw5pEUjiof+II3HkDd7GFcgkzw==} + '@next/swc-linux-arm64-gnu@16.0.1': + resolution: {integrity: sha512-hWg3BtsxQuSKhfe0LunJoqxjO4NEpBmKkE+P2Sroos7yB//OOX3jD5ISP2wv8QdUwtRehMdwYz6VB50mY6hqAg==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-musl@15.3.3': - resolution: {integrity: sha512-h6Y1fLU4RWAp1HPNJWDYBQ+e3G7sLckyBXhmH9ajn8l/RSMnhbuPBV/fXmy3muMcVwoJdHL+UtzRzs0nXOf9SA==} + '@next/swc-linux-arm64-musl@16.0.1': + resolution: {integrity: sha512-UPnOvYg+fjAhP3b1iQStcYPWeBFRLrugEyK/lDKGk7kLNua8t5/DvDbAEFotfV1YfcOY6bru76qN9qnjLoyHCQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-x64-gnu@15.3.3': - resolution: {integrity: sha512-jJ8HRiF3N8Zw6hGlytCj5BiHyG/K+fnTKVDEKvUCyiQ/0r5tgwO7OgaRiOjjRoIx2vwLR+Rz8hQoPrnmFbJdfw==} + '@next/swc-linux-x64-gnu@16.0.1': + resolution: {integrity: sha512-Et81SdWkcRqAJziIgFtsFyJizHoWne4fzJkvjd6V4wEkWTB4MX6J0uByUb0peiJQ4WeAt6GGmMszE5KrXK6WKg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-linux-x64-musl@15.3.3': - resolution: {integrity: sha512-HrUcTr4N+RgiiGn3jjeT6Oo208UT/7BuTr7K0mdKRBtTbT4v9zJqCDKO97DUqqoBK1qyzP1RwvrWTvU6EPh/Cw==} + '@next/swc-linux-x64-musl@16.0.1': + resolution: {integrity: sha512-qBbgYEBRrC1egcG03FZaVfVxrJm8wBl7vr8UFKplnxNRprctdP26xEv9nJ07Ggq4y1adwa0nz2mz83CELY7N6Q==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-win32-arm64-msvc@15.3.3': - resolution: {integrity: sha512-SxorONgi6K7ZUysMtRF3mIeHC5aA3IQLmKFQzU0OuhuUYwpOBc1ypaLJLP5Bf3M9k53KUUUj4vTPwzGvl/NwlQ==} + '@next/swc-win32-arm64-msvc@16.0.1': + resolution: {integrity: sha512-cPuBjYP6I699/RdbHJonb3BiRNEDm5CKEBuJ6SD8k3oLam2fDRMKAvmrli4QMDgT2ixyRJ0+DTkiODbIQhRkeQ==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@next/swc-win32-x64-msvc@15.3.3': - resolution: {integrity: sha512-4QZG6F8enl9/S2+yIiOiju0iCTFd93d8VC1q9LZS4p/Xuk81W2QDjCFeoogmrWWkAD59z8ZxepBQap2dKS5ruw==} + '@next/swc-win32-x64-msvc@16.0.1': + resolution: {integrity: sha512-XeEUJsE4JYtfrXe/LaJn3z1pD19fK0Q6Er8Qoufi+HqvdO4LEPyCxLUt4rxA+4RfYo6S9gMlmzCMU2F+AatFqQ==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@orama/orama@3.1.7': - resolution: {integrity: sha512-6yB0117ZjsgNevZw3LP+bkrZa9mU/POPVaXgzMPOBbBc35w2P3R+1vMMhEfC06kYCpd5bf0jodBaTkYQW5TVeQ==} + '@orama/orama@3.1.16': + resolution: {integrity: sha512-scSmQBD8eANlMUOglxHrN1JdSW8tDghsPuS83otqealBiIeMukCQMOf/wc0JJjDXomqwNdEQFLXLGHrU6PGxuA==} engines: {node: '>= 20.0.0'} '@posthog/core@1.3.0': @@ -501,11 +528,11 @@ packages: '@radix-ui/number@1.1.1': resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==} - '@radix-ui/primitive@1.1.2': - resolution: {integrity: sha512-XnbHrrprsNqZKQhStrSwgRUQzoCI1glLzdw79xiZPoofhGICeZRSQ3dIxAKH1gb3OHfNf4d6f+vAv3kil2eggA==} + '@radix-ui/primitive@1.1.3': + resolution: {integrity: sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==} - '@radix-ui/react-accordion@1.2.11': - resolution: {integrity: sha512-l3W5D54emV2ues7jjeG1xcyN7S3jnK3zE2zHqgn0CmMsy9lNJwmgcrmaxS+7ipw15FAivzKNzH3d5EcGoFKw0A==} + '@radix-ui/react-accordion@1.2.12': + resolution: {integrity: sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -530,8 +557,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-collapsible@1.1.11': - resolution: {integrity: sha512-2qrRsVGSCYasSz1RFOorXwl0H7g7J1frQtgpQgYrt+MOidtPAINHn9CPovQXb83r8ahapdx3Tu0fa/pdFFSdPg==} + '@radix-ui/react-collapsible@1.1.12': + resolution: {integrity: sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -574,8 +601,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-dialog@1.1.14': - resolution: {integrity: sha512-+CpweKjqpzTmwRwcYECQcNYbI8V9VSQt0SNFKeEBLgfucbsLssU6Ppq7wUdNXEGb573bMjFhVjKVll8rmV6zMw==} + '@radix-ui/react-dialog@1.1.15': + resolution: {integrity: sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -596,8 +623,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-dismissable-layer@1.1.10': - resolution: {integrity: sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ==} + '@radix-ui/react-dismissable-layer@1.1.11': + resolution: {integrity: sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -609,8 +636,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-focus-guards@1.1.2': - resolution: {integrity: sha512-fyjAACV62oPV925xFCrH8DR5xWhg9KYtJT4s3u54jxp+L/hbpTY2kIeEFFbFe+a/HCE94zGQMZLIpVTPVZDhaA==} + '@radix-ui/react-focus-guards@1.1.3': + resolution: {integrity: sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==} peerDependencies: '@types/react': '*' react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc @@ -640,8 +667,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-navigation-menu@1.2.13': - resolution: {integrity: sha512-WG8wWfDiJlSF5hELjwfjSGOXcBR/ZMhBFCGYe8vERpC39CQYZeq1PQ2kaYHdye3V95d06H89KGMsVCIE4LWo3g==} + '@radix-ui/react-navigation-menu@1.2.14': + resolution: {integrity: sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -653,8 +680,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-popover@1.1.14': - resolution: {integrity: sha512-ODz16+1iIbGUfFEfKx2HTPKizg2MN39uIOV8MXeHnmdd3i/N9Wt7vU46wbHsqA0xoaQyXVcs0KIlBdOA2Y95bw==} + '@radix-ui/react-popover@1.1.15': + resolution: {integrity: sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -666,8 +693,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-popper@1.2.7': - resolution: {integrity: sha512-IUFAccz1JyKcf/RjB552PlWwxjeCJB8/4KxT7EhBHOJM+mN7LdW+B3kacJXILm32xawcMMjb2i0cIZpo+f9kiQ==} + '@radix-ui/react-popper@1.2.8': + resolution: {integrity: sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -692,8 +719,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-presence@1.1.4': - resolution: {integrity: sha512-ueDqRbdc4/bkaQT3GIpLQssRlFgWaL/U2z/S31qRwwLWoxHLgry3SIfCwhxeQNbirEUXFa+lq3RL3oBYXtcmIA==} + '@radix-ui/react-presence@1.1.5': + resolution: {integrity: sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -718,8 +745,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-roving-focus@1.1.10': - resolution: {integrity: sha512-dT9aOXUen9JSsxnMPv/0VqySQf5eDQ6LCk5Sw28kamz8wSOW2bJdlX2Bg5VUIIcV+6XlHpWTIuTPCf/UNIyq8Q==} + '@radix-ui/react-roving-focus@1.1.11': + resolution: {integrity: sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -731,8 +758,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-scroll-area@1.2.9': - resolution: {integrity: sha512-YSjEfBXnhUELsO2VzjdtYYD4CfQjvao+lhhrX5XsHD7/cyUNzljF1FHEbgTPN7LH2MClfwRMIsYlqTYpKTTe2A==} + '@radix-ui/react-scroll-area@1.2.10': + resolution: {integrity: sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -753,8 +780,17 @@ packages: '@types/react': optional: true - '@radix-ui/react-tabs@1.1.12': - resolution: {integrity: sha512-GTVAlRVrQrSw3cEARM0nAx73ixrWDPNZAruETn3oHCNP6SbZ/hNxdxp+u7VkIEv3/sFoLq1PfcHrl7Pnp0CDpw==} + '@radix-ui/react-slot@1.2.4': + resolution: {integrity: sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-tabs@1.1.13': + resolution: {integrity: sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -854,29 +890,29 @@ packages: '@radix-ui/rect@1.1.1': resolution: {integrity: sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==} - '@shikijs/core@3.7.0': - resolution: {integrity: sha512-yilc0S9HvTPyahHpcum8eonYrQtmGTU0lbtwxhA6jHv4Bm1cAdlPFRCJX4AHebkCm75aKTjjRAW+DezqD1b/cg==} + '@shikijs/core@3.15.0': + resolution: {integrity: sha512-8TOG6yG557q+fMsSVa8nkEDOZNTSxjbbR8l6lF2gyr6Np+jrPlslqDxQkN6rMXCECQ3isNPZAGszAfYoJOPGlg==} - '@shikijs/engine-javascript@3.7.0': - resolution: {integrity: sha512-0t17s03Cbv+ZcUvv+y33GtX75WBLQELgNdVghnsdhTgU3hVcWcMsoP6Lb0nDTl95ZJfbP1mVMO0p3byVh3uuzA==} + '@shikijs/engine-javascript@3.15.0': + resolution: {integrity: sha512-ZedbOFpopibdLmvTz2sJPJgns8Xvyabe2QbmqMTz07kt1pTzfEvKZc5IqPVO/XFiEbbNyaOpjPBkkr1vlwS+qg==} - '@shikijs/engine-oniguruma@3.7.0': - resolution: {integrity: sha512-5BxcD6LjVWsGu4xyaBC5bu8LdNgPCVBnAkWTtOCs/CZxcB22L8rcoWfv7Hh/3WooVjBZmFtyxhgvkQFedPGnFw==} + '@shikijs/engine-oniguruma@3.15.0': + resolution: {integrity: sha512-HnqFsV11skAHvOArMZdLBZZApRSYS4LSztk2K3016Y9VCyZISnlYUYsL2hzlS7tPqKHvNqmI5JSUJZprXloMvA==} - '@shikijs/langs@3.7.0': - resolution: {integrity: sha512-1zYtdfXLr9xDKLTGy5kb7O0zDQsxXiIsw1iIBcNOO8Yi5/Y1qDbJ+0VsFoqTlzdmneO8Ij35g7QKF8kcLyznCQ==} + '@shikijs/langs@3.15.0': + resolution: {integrity: sha512-WpRvEFvkVvO65uKYW4Rzxs+IG0gToyM8SARQMtGGsH4GDMNZrr60qdggXrFOsdfOVssG/QQGEl3FnJ3EZ+8w8A==} - '@shikijs/rehype@3.7.0': - resolution: {integrity: sha512-YjAZxhQnBXE8ehppKGzuVGPoE4pjVsxqzkWhBZlkP495AjlR++MgfiRFcQfDt3qX5lK3gEDTcghB/8E3yNrWqQ==} + '@shikijs/rehype@3.15.0': + resolution: {integrity: sha512-U+tqD1oxL+85N8FaW5XYIlMZ8KAa2g9IdplEZxPWflGRJf2gQRiBMMrpdG1USz3PN350YnMUHWcz9Twt3wJjXQ==} - '@shikijs/themes@3.7.0': - resolution: {integrity: sha512-VJx8497iZPy5zLiiCTSIaOChIcKQwR0FebwE9S3rcN0+J/GTWwQ1v/bqhTbpbY3zybPKeO8wdammqkpXc4NVjQ==} + '@shikijs/themes@3.15.0': + resolution: {integrity: sha512-8ow2zWb1IDvCKjYb0KiLNrK4offFdkfNVPXb1OZykpLCzRU6j+efkY+Y7VQjNlNFXonSw+4AOdGYtmqykDbRiQ==} - '@shikijs/transformers@3.7.0': - resolution: {integrity: sha512-VplaqIMRNsNOorCXJHkbF5S0pT6xm8Z/s7w7OPZLohf8tR93XH0krvUafpNy/ozEylrWuShJF0+ftEB+wFRwGA==} + '@shikijs/transformers@3.15.0': + resolution: {integrity: sha512-Hmwip5ovvSkg+Kc41JTvSHHVfCYF+C8Cp1omb5AJj4Xvd+y9IXz2rKJwmFRGsuN0vpHxywcXJ1+Y4B9S7EG1/A==} - '@shikijs/types@3.7.0': - resolution: {integrity: sha512-MGaLeaRlSWpnP0XSAum3kP3a8vtcTsITqoEPYdt3lQG3YCdQH4DnEhodkYcNMcU0uW0RffhoD1O3e0vG5eSBBg==} + '@shikijs/types@3.15.0': + resolution: {integrity: sha512-BnP+y/EQnhihgHy4oIAN+6FFtmfTekwOLsQbRw9hOKwqgNy8Bdsjq8B05oAt/ZgvIWWFrshV71ytOrlPfYjIJw==} '@shikijs/vscode-textmate@10.0.2': resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==} @@ -884,9 +920,6 @@ packages: '@standard-schema/spec@1.0.0': resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==} - '@swc/counter@0.1.3': - resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} - '@swc/helpers@0.5.15': resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} @@ -1128,9 +1161,6 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - argparse@1.0.10: - resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} - argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} @@ -1145,10 +1175,6 @@ packages: bail@2.0.2: resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==} - busboy@1.6.0: - resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} - engines: {node: '>=10.16.0'} - caniuse-lite@1.0.30001724: resolution: {integrity: sha512-WqJo7p0TbHDOythNTqYujmaJTvtYRZrjpP8TCvH6Vb9CYJerJNKamKzIWOM4BkQatWj9H2lYulpdAQNBe7QhNA==} @@ -1196,20 +1222,6 @@ packages: collapse-white-space@2.1.0: resolution: {integrity: sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==} - color-convert@2.0.1: - resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} - engines: {node: '>=7.0.0'} - - color-name@1.1.4: - resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} - - color-string@1.9.1: - resolution: {integrity: sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==} - - color@4.2.3: - resolution: {integrity: sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==} - engines: {node: '>=12.5.0'} - comma-separated-tokens@2.0.3: resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} @@ -1429,6 +1441,10 @@ packages: resolution: {integrity: sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==} engines: {node: '>=8'} + detect-libc@2.1.2: + resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} + engines: {node: '>=8'} + detect-node-es@1.1.0: resolution: {integrity: sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==} @@ -1448,8 +1464,8 @@ packages: esast-util-from-js@2.0.1: resolution: {integrity: sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==} - esbuild@0.25.5: - resolution: {integrity: sha512-P8OtKZRv/5J5hhz0cUAdu/cLuPIKXpQl1R9pZtvmHWQvrAUVd0UNIPT4IB4W3rNOqVO0rlqHmCIbSwxh/c9yUQ==} + esbuild@0.25.12: + resolution: {integrity: sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==} engines: {node: '>=18'} hasBin: true @@ -1457,11 +1473,6 @@ packages: resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} engines: {node: '>=12'} - esprima@4.0.1: - resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} - engines: {node: '>=4'} - hasBin: true - estree-util-attach-comments@3.0.0: resolution: {integrity: sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==} @@ -1477,8 +1488,8 @@ packages: estree-util-to-js@2.0.0: resolution: {integrity: sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==} - estree-util-value-to-estree@3.4.0: - resolution: {integrity: sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==} + estree-util-value-to-estree@3.5.0: + resolution: {integrity: sha512-aMV56R27Gv3QmfmF1MY12GWkGzzeAezAX+UplqHVASfjc9wNzI/X6hC0S9oxq61WT4aQesLGslWP9tKk6ghRZQ==} estree-util-visit@2.0.0: resolution: {integrity: sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==} @@ -1489,15 +1500,12 @@ packages: exsolve@1.0.7: resolution: {integrity: sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==} - extend-shallow@2.0.1: - resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==} - engines: {node: '>=0.10.0'} - extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} - fdir@6.4.6: - resolution: {integrity: sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w==} + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: @@ -1507,45 +1515,76 @@ packages: fflate@0.4.8: resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} - fumadocs-core@15.5.1: - resolution: {integrity: sha512-5eJPJw+BFWFdgrtWPQ9aAZAhhsyuZAwth8OjBd9R77sXoIoae4Y4lJZMq3BeSpJZcuIAOVbSCS+pJhsBAoXJ8g==} + fumadocs-core@16.0.8: + resolution: {integrity: sha512-nWWwEfcHxn4tmx/knvDlbRUUkbGc6ChNEpLymV5cmQCSAazG0FkgksF5aOxoDq1wPG0THoEMBVD320spe4QWHw==} peerDependencies: - '@oramacloud/client': 1.x.x || 2.x.x + '@mixedbread/sdk': ^0.19.0 + '@orama/core': 1.x.x + '@tanstack/react-router': 1.x.x + '@types/react': '*' algoliasearch: 5.x.x - next: 14.x.x || 15.x.x - react: 18.x.x || 19.x.x - react-dom: 18.x.x || 19.x.x + lucide-react: '*' + next: 16.x.x + react: ^19.2.0 + react-dom: ^19.2.0 + react-router: 7.x.x + waku: ^0.26.0 peerDependenciesMeta: - '@oramacloud/client': + '@mixedbread/sdk': + optional: true + '@orama/core': + optional: true + '@tanstack/react-router': + optional: true + '@types/react': optional: true algoliasearch: optional: true + lucide-react: + optional: true next: optional: true react: optional: true react-dom: optional: true + react-router: + optional: true + waku: + optional: true - fumadocs-mdx@11.6.7: - resolution: {integrity: sha512-jOZzxowvhwe9RzV6jVjIS2FsQIz9P6QYkMBPgR0nq9+7trP+mmiLoIq5EwhTPrR/Y/4gTiSl9TXFWxTY02trnw==} + fumadocs-mdx@13.0.5: + resolution: {integrity: sha512-ERhPxQzoTwEdtuel5dN5OmUItOhGGXTLR1uCjiGPABYeVkc57vAexyTRQSYZMxGlcfjkJaYqt3qY1p5j7i4g7A==} hasBin: true peerDependencies: - '@fumadocs/mdx-remote': ^1.2.0 - fumadocs-core: ^14.0.0 || ^15.0.0 - next: ^15.3.0 + '@fumadocs/mdx-remote': ^1.4.0 + fumadocs-core: ^15.0.0 || ^16.0.0 + next: ^15.3.0 || ^16.0.0 + react: '*' + vite: 6.x.x || 7.x.x peerDependenciesMeta: '@fumadocs/mdx-remote': optional: true + next: + optional: true + react: + optional: true + vite: + optional: true - fumadocs-ui@15.5.1: - resolution: {integrity: sha512-HyMoM+mv5WZrXDAv88SLLqFrduDSxQHFU+uQkSpJQdycaGNSIB8063PW/wb/QIliusWP8o+c/YLFy/29KymEWA==} + fumadocs-ui@16.0.8: + resolution: {integrity: sha512-NyqAiYJnseXYy6ah/rI67Luy5mssSOwOMv03Xy2SHaDrH588Xjtbx84DwnaJXDlu2L/evHMJ+Bvt5/WIdBQbWQ==} peerDependencies: - next: 14.x.x || 15.x.x - react: 18.x.x || 19.x.x - react-dom: 18.x.x || 19.x.x - tailwindcss: ^3.4.14 || ^4.0.0 + '@types/react': '*' + next: 16.x.x + react: ^19.2.0 + react-dom: ^19.2.0 + tailwindcss: ^4.0.0 peerDependenciesMeta: + '@types/react': + optional: true + next: + optional: true tailwindcss: optional: true @@ -1563,10 +1602,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - gray-matter@4.0.3: - resolution: {integrity: sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==} - engines: {node: '>=6.0'} - hachure-fill@0.5.2: resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} @@ -1613,16 +1648,9 @@ packages: is-alphanumerical@2.0.1: resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} - is-arrayish@0.3.2: - resolution: {integrity: sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==} - is-decimal@2.0.1: resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} - is-extendable@0.1.1: - resolution: {integrity: sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==} - engines: {node: '>=0.10.0'} - is-hexadecimal@2.0.1: resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} @@ -1634,10 +1662,6 @@ packages: resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==} hasBin: true - js-yaml@3.14.1: - resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} - hasBin: true - js-yaml@4.1.0: resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} hasBin: true @@ -1649,10 +1673,6 @@ packages: khroma@2.1.0: resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} - kind-of@6.0.3: - resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} - engines: {node: '>=0.10.0'} - kolorist@1.8.0: resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==} @@ -1743,8 +1763,8 @@ packages: longest-streak@3.1.0: resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} - lru-cache@11.1.0: - resolution: {integrity: sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A==} + lru-cache@11.2.2: + resolution: {integrity: sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg==} engines: {node: 20 || >=22} lucide-react@0.525.0: @@ -1957,13 +1977,13 @@ packages: react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc - next@15.3.3: - resolution: {integrity: sha512-JqNj29hHNmCLtNvd090SyRbXJiivQ+58XjCcrC50Crb5g5u2zi7Y2YivbsEfzk6AtVI80akdOQbaMZwWB1Hthw==} - engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} + next@16.0.1: + resolution: {integrity: sha512-e9RLSssZwd35p7/vOa+hoDFggUZIUbZhIUSLZuETCwrCVvxOs87NamoUzT+vbcNAL8Ld9GobBnWOA6SbV/arOw==} + engines: {node: '>=20.9.0'} hasBin: true peerDependencies: '@opentelemetry/api': ^1.1.0 - '@playwright/test': ^1.41.2 + '@playwright/test': ^1.51.1 babel-plugin-react-compiler: '*' react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 @@ -1978,6 +1998,10 @@ packages: sass: optional: true + npm-to-yarn@3.0.1: + resolution: {integrity: sha512-tt6PvKu4WyzPwWUzy/hvPFqn+uwXO0K1ZHka8az3NnrhWJDmSqI8ncWq0fkL0k/lmmi5tAC11FXwXuh0rFbt1A==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + oniguruma-parser@0.12.1: resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} @@ -1993,14 +2017,17 @@ packages: path-data-parser@0.1.0: resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} + path-to-regexp@8.3.0: + resolution: {integrity: sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==} + pathe@2.0.3: resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} - picomatch@4.0.2: - resolution: {integrity: sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==} + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} pkg-types@1.3.1: @@ -2052,18 +2079,18 @@ packages: quansync@0.2.10: resolution: {integrity: sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A==} - react-dom@19.1.0: - resolution: {integrity: sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==} + react-dom@19.2.0: + resolution: {integrity: sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==} peerDependencies: - react: ^19.1.0 + react: ^19.2.0 react-icons@5.5.0: resolution: {integrity: sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw==} peerDependencies: react: '*' - react-medium-image-zoom@5.2.14: - resolution: {integrity: sha512-nfTVYcAUnBzXQpPDcZL+cG/e6UceYUIG+zDcnemL7jtAqbJjVVkA85RgneGtJeni12dTyiRPZVM6Szkmwd/o8w==} + react-medium-image-zoom@5.4.0: + resolution: {integrity: sha512-BsE+EnFVQzFIlyuuQrZ9iTwyKpKkqdFZV1ImEQN573QPqGrIUuNni7aF+sZwDcxlsuOMayCr6oO/PZR/yJnbRg==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 @@ -2098,8 +2125,8 @@ packages: '@types/react': optional: true - react@19.1.0: - resolution: {integrity: sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==} + react@19.2.0: + resolution: {integrity: sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==} engines: {node: '>=0.10.0'} readdirp@4.1.2: @@ -2136,6 +2163,9 @@ packages: remark-mdx@3.1.0: resolution: {integrity: sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA==} + remark-mdx@3.1.1: + resolution: {integrity: sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==} + remark-parse@11.0.0: resolution: {integrity: sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==} @@ -2160,30 +2190,23 @@ packages: safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - scheduler@0.26.0: - resolution: {integrity: sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==} + scheduler@0.27.0: + resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==} scroll-into-view-if-needed@3.1.0: resolution: {integrity: sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==} - section-matter@1.0.0: - resolution: {integrity: sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==} - engines: {node: '>=4'} - - semver@7.7.2: - resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} engines: {node: '>=10'} hasBin: true - sharp@0.34.2: - resolution: {integrity: sha512-lszvBmB9QURERtyKT2bNmsgxXK0ShJrL/fvqlonCo7e6xBF8nT8xU6pW+PMIbLsz0RxQk3rgH9kd8UmvOzlMJg==} + sharp@0.34.5: + resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} - shiki@3.7.0: - resolution: {integrity: sha512-ZcI4UT9n6N2pDuM2n3Jbk0sR4Swzq43nLPgS/4h0E3B/NrFn2HKElrDtceSf8Zx/OWYOo7G1SAtBLypCp+YXqg==} - - simple-swizzle@0.2.2: - resolution: {integrity: sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==} + shiki@3.15.0: + resolution: {integrity: sha512-kLdkY6iV3dYbtPwS9KXU7mjfmDm25f5m0IPNFnaXO7TBPcvbUOY72PYXSuSqDzwp+vlH/d7MXpHlKO/x+QoLXw==} source-map-js@1.2.1: resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} @@ -2196,20 +2219,9 @@ packages: space-separated-tokens@2.0.2: resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==} - sprintf-js@1.0.3: - resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - - streamsearch@1.1.0: - resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} - engines: {node: '>=10.0.0'} - stringify-entities@4.0.4: resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} - strip-bom-string@1.0.0: - resolution: {integrity: sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==} - engines: {node: '>=0.10.0'} - style-to-js@1.1.17: resolution: {integrity: sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA==} @@ -2249,8 +2261,8 @@ packages: tinyexec@1.0.1: resolution: {integrity: sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw==} - tinyglobby@0.2.14: - resolution: {integrity: sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==} + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} trim-lines@3.0.1: @@ -2289,6 +2301,9 @@ packages: unist-util-position@5.0.0: resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==} + unist-util-remove-position@5.0.0: + resolution: {integrity: sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==} + unist-util-stringify-position@4.0.0: resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==} @@ -2358,8 +2373,8 @@ packages: resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} engines: {node: '>=18'} - zod@3.25.76: - resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} + zod@4.1.12: + resolution: {integrity: sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==} zwitch@2.0.4: resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} @@ -2399,84 +2414,87 @@ snapshots: '@chevrotain/utils@11.0.3': {} - '@emnapi/runtime@1.4.3': + '@emnapi/runtime@1.7.0': dependencies: tslib: 2.8.1 optional: true - '@esbuild/aix-ppc64@0.25.5': + '@esbuild/aix-ppc64@0.25.12': optional: true - '@esbuild/android-arm64@0.25.5': + '@esbuild/android-arm64@0.25.12': optional: true - '@esbuild/android-arm@0.25.5': + '@esbuild/android-arm@0.25.12': optional: true - '@esbuild/android-x64@0.25.5': + '@esbuild/android-x64@0.25.12': optional: true - '@esbuild/darwin-arm64@0.25.5': + '@esbuild/darwin-arm64@0.25.12': optional: true - '@esbuild/darwin-x64@0.25.5': + '@esbuild/darwin-x64@0.25.12': optional: true - '@esbuild/freebsd-arm64@0.25.5': + '@esbuild/freebsd-arm64@0.25.12': optional: true - '@esbuild/freebsd-x64@0.25.5': + '@esbuild/freebsd-x64@0.25.12': optional: true - '@esbuild/linux-arm64@0.25.5': + '@esbuild/linux-arm64@0.25.12': optional: true - '@esbuild/linux-arm@0.25.5': + '@esbuild/linux-arm@0.25.12': optional: true - '@esbuild/linux-ia32@0.25.5': + '@esbuild/linux-ia32@0.25.12': optional: true - '@esbuild/linux-loong64@0.25.5': + '@esbuild/linux-loong64@0.25.12': optional: true - '@esbuild/linux-mips64el@0.25.5': + '@esbuild/linux-mips64el@0.25.12': optional: true - '@esbuild/linux-ppc64@0.25.5': + '@esbuild/linux-ppc64@0.25.12': optional: true - '@esbuild/linux-riscv64@0.25.5': + '@esbuild/linux-riscv64@0.25.12': optional: true - '@esbuild/linux-s390x@0.25.5': + '@esbuild/linux-s390x@0.25.12': optional: true - '@esbuild/linux-x64@0.25.5': + '@esbuild/linux-x64@0.25.12': optional: true - '@esbuild/netbsd-arm64@0.25.5': + '@esbuild/netbsd-arm64@0.25.12': optional: true - '@esbuild/netbsd-x64@0.25.5': + '@esbuild/netbsd-x64@0.25.12': optional: true - '@esbuild/openbsd-arm64@0.25.5': + '@esbuild/openbsd-arm64@0.25.12': optional: true - '@esbuild/openbsd-x64@0.25.5': + '@esbuild/openbsd-x64@0.25.12': optional: true - '@esbuild/sunos-x64@0.25.5': + '@esbuild/openharmony-arm64@0.25.12': optional: true - '@esbuild/win32-arm64@0.25.5': + '@esbuild/sunos-x64@0.25.12': optional: true - '@esbuild/win32-ia32@0.25.5': + '@esbuild/win32-arm64@0.25.12': optional: true - '@esbuild/win32-x64@0.25.5': + '@esbuild/win32-ia32@0.25.12': + optional: true + + '@esbuild/win32-x64@0.25.12': optional: true '@floating-ui/core@1.7.1': @@ -2488,15 +2506,15 @@ snapshots: '@floating-ui/core': 1.7.1 '@floating-ui/utils': 0.2.9 - '@floating-ui/react-dom@2.1.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@floating-ui/react-dom@2.1.3(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: '@floating-ui/dom': 1.7.1 - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) '@floating-ui/utils@0.2.9': {} - '@formatjs/intl-localematcher@0.6.1': + '@formatjs/intl-localematcher@0.6.2': dependencies: tslib: 2.8.1 @@ -2515,85 +2533,101 @@ snapshots: transitivePeerDependencies: - supports-color - '@img/sharp-darwin-arm64@0.34.2': + '@img/colour@1.0.0': + optional: true + + '@img/sharp-darwin-arm64@0.34.5': optionalDependencies: - '@img/sharp-libvips-darwin-arm64': 1.1.0 + '@img/sharp-libvips-darwin-arm64': 1.2.4 optional: true - '@img/sharp-darwin-x64@0.34.2': + '@img/sharp-darwin-x64@0.34.5': optionalDependencies: - '@img/sharp-libvips-darwin-x64': 1.1.0 + '@img/sharp-libvips-darwin-x64': 1.2.4 optional: true - '@img/sharp-libvips-darwin-arm64@1.1.0': + '@img/sharp-libvips-darwin-arm64@1.2.4': optional: true - '@img/sharp-libvips-darwin-x64@1.1.0': + '@img/sharp-libvips-darwin-x64@1.2.4': optional: true - '@img/sharp-libvips-linux-arm64@1.1.0': + '@img/sharp-libvips-linux-arm64@1.2.4': optional: true - '@img/sharp-libvips-linux-arm@1.1.0': + '@img/sharp-libvips-linux-arm@1.2.4': optional: true - '@img/sharp-libvips-linux-ppc64@1.1.0': + '@img/sharp-libvips-linux-ppc64@1.2.4': optional: true - '@img/sharp-libvips-linux-s390x@1.1.0': + '@img/sharp-libvips-linux-riscv64@1.2.4': optional: true - '@img/sharp-libvips-linux-x64@1.1.0': + '@img/sharp-libvips-linux-s390x@1.2.4': optional: true - '@img/sharp-libvips-linuxmusl-arm64@1.1.0': + '@img/sharp-libvips-linux-x64@1.2.4': optional: true - '@img/sharp-libvips-linuxmusl-x64@1.1.0': + '@img/sharp-libvips-linuxmusl-arm64@1.2.4': optional: true - '@img/sharp-linux-arm64@0.34.2': + '@img/sharp-libvips-linuxmusl-x64@1.2.4': + optional: true + + '@img/sharp-linux-arm64@0.34.5': optionalDependencies: - '@img/sharp-libvips-linux-arm64': 1.1.0 + '@img/sharp-libvips-linux-arm64': 1.2.4 optional: true - '@img/sharp-linux-arm@0.34.2': + '@img/sharp-linux-arm@0.34.5': optionalDependencies: - '@img/sharp-libvips-linux-arm': 1.1.0 + '@img/sharp-libvips-linux-arm': 1.2.4 optional: true - '@img/sharp-linux-s390x@0.34.2': + '@img/sharp-linux-ppc64@0.34.5': optionalDependencies: - '@img/sharp-libvips-linux-s390x': 1.1.0 + '@img/sharp-libvips-linux-ppc64': 1.2.4 optional: true - '@img/sharp-linux-x64@0.34.2': + '@img/sharp-linux-riscv64@0.34.5': optionalDependencies: - '@img/sharp-libvips-linux-x64': 1.1.0 + '@img/sharp-libvips-linux-riscv64': 1.2.4 optional: true - '@img/sharp-linuxmusl-arm64@0.34.2': + '@img/sharp-linux-s390x@0.34.5': optionalDependencies: - '@img/sharp-libvips-linuxmusl-arm64': 1.1.0 + '@img/sharp-libvips-linux-s390x': 1.2.4 optional: true - '@img/sharp-linuxmusl-x64@0.34.2': + '@img/sharp-linux-x64@0.34.5': optionalDependencies: - '@img/sharp-libvips-linuxmusl-x64': 1.1.0 + '@img/sharp-libvips-linux-x64': 1.2.4 optional: true - '@img/sharp-wasm32@0.34.2': + '@img/sharp-linuxmusl-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.2.4 + optional: true + + '@img/sharp-linuxmusl-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.2.4 + optional: true + + '@img/sharp-wasm32@0.34.5': dependencies: - '@emnapi/runtime': 1.4.3 + '@emnapi/runtime': 1.7.0 optional: true - '@img/sharp-win32-arm64@0.34.2': + '@img/sharp-win32-arm64@0.34.5': optional: true - '@img/sharp-win32-ia32@0.34.2': + '@img/sharp-win32-ia32@0.34.5': optional: true - '@img/sharp-win32-x64@0.34.2': + '@img/sharp-win32-x64@0.34.5': optional: true '@isaacs/fs-minipass@4.0.1': @@ -2617,12 +2651,13 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 - '@mdx-js/mdx@3.1.0(acorn@8.15.0)': + '@mdx-js/mdx@3.1.1': dependencies: '@types/estree': 1.0.8 '@types/estree-jsx': 1.0.5 '@types/hast': 3.0.4 '@types/mdx': 2.0.13 + acorn: 8.15.0 collapse-white-space: 2.1.0 devlop: 1.1.0 estree-util-is-identifier-name: 3.0.0 @@ -2634,7 +2669,7 @@ snapshots: recma-jsx: 1.0.0(acorn@8.15.0) recma-stringify: 1.0.0 rehype-recma: 1.0.0 - remark-mdx: 3.1.0 + remark-mdx: 3.1.1 remark-parse: 11.0.0 remark-rehype: 11.1.2 source-map: 0.7.4 @@ -2644,433 +2679,439 @@ snapshots: unist-util-visit: 5.0.0 vfile: 6.0.3 transitivePeerDependencies: - - acorn - supports-color '@mermaid-js/parser@0.6.1': dependencies: langium: 3.3.1 - '@next/env@15.3.3': {} + '@next/env@16.0.1': {} - '@next/swc-darwin-arm64@15.3.3': + '@next/swc-darwin-arm64@16.0.1': optional: true - '@next/swc-darwin-x64@15.3.3': + '@next/swc-darwin-x64@16.0.1': optional: true - '@next/swc-linux-arm64-gnu@15.3.3': + '@next/swc-linux-arm64-gnu@16.0.1': optional: true - '@next/swc-linux-arm64-musl@15.3.3': + '@next/swc-linux-arm64-musl@16.0.1': optional: true - '@next/swc-linux-x64-gnu@15.3.3': + '@next/swc-linux-x64-gnu@16.0.1': optional: true - '@next/swc-linux-x64-musl@15.3.3': + '@next/swc-linux-x64-musl@16.0.1': optional: true - '@next/swc-win32-arm64-msvc@15.3.3': + '@next/swc-win32-arm64-msvc@16.0.1': optional: true - '@next/swc-win32-x64-msvc@15.3.3': + '@next/swc-win32-x64-msvc@16.0.1': optional: true - '@orama/orama@3.1.7': {} + '@orama/orama@3.1.16': {} '@posthog/core@1.3.0': {} '@radix-ui/number@1.1.1': {} - '@radix-ui/primitive@1.1.2': {} + '@radix-ui/primitive@1.1.3': {} - '@radix-ui/react-accordion@1.2.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-accordion@1.2.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collapsible': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-collapsible': 1.1.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-arrow@1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-arrow@1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-collapsible@1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-collapsible@1.1.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-collection@1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-collection@1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-compose-refs@1.1.2(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-compose-refs@1.1.2(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-context@1.1.2(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-context@1.1.2(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-dialog@1.1.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-dialog@1.1.15(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-focus-guards': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-focus-guards': 1.1.3(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) aria-hidden: 1.2.6 - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) - react-remove-scroll: 2.7.1(@types/react@19.1.8)(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + react-remove-scroll: 2.7.1(@types/react@19.1.8)(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-direction@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-direction@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-dismissable-layer@1.1.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-dismissable-layer@1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-focus-guards@1.1.2(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-focus-guards@1.1.3(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-id@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-id@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-navigation-menu@1.2.13(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-navigation-menu@1.2.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-previous': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-previous': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-popover@1.1.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-popover@1.1.15(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-focus-guards': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-popper': 1.2.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-focus-guards': 1.1.3(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-popper': 1.2.8(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) aria-hidden: 1.2.6 - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) - react-remove-scroll: 2.7.1(@types/react@19.1.8)(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + react-remove-scroll: 2.7.1(@types/react@19.1.8)(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-popper@1.2.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-popper@1.2.8(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@floating-ui/react-dom': 2.1.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-arrow': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-rect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-size': 1.1.1(@types/react@19.1.8)(react@19.1.0) + '@floating-ui/react-dom': 2.1.3(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-arrow': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-rect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-size': 1.1.1(@types/react@19.1.8)(react@19.2.0) '@radix-ui/rect': 1.1.1 - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-portal@1.1.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-portal@1.1.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-presence@1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-presence@1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-primitive@2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-primitive@2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-roving-focus@1.1.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-roving-focus@1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-scroll-area@1.2.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-scroll-area@1.2.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: '@radix-ui/number': 1.1.1 - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-slot@1.2.3(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-slot@1.2.3(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-tabs@1.1.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-slot@1.2.4(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-roving-focus': 1.1.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + optionalDependencies: + '@types/react': 19.1.8 + + '@radix-ui/react-tabs@1.1.13(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': + dependencies: + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-context': 1.1.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-roving-focus': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) - '@radix-ui/react-use-callback-ref@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-callback-ref@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-controllable-state@1.2.2(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-controllable-state@1.2.2(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/react-use-effect-event': 0.0.2(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 + '@radix-ui/react-use-effect-event': 0.0.2(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-effect-event@0.0.2(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-effect-event@0.0.2(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-escape-keydown@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-escape-keydown@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-layout-effect@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-layout-effect@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-previous@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-previous@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-rect@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-rect@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: '@radix-ui/rect': 1.1.1 - react: 19.1.0 + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-use-size@1.1.1(@types/react@19.1.8)(react@19.1.0)': + '@radix-ui/react-use-size@1.1.1(@types/react@19.1.8)(react@19.2.0)': dependencies: - '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.1.0) - react: 19.1.0 + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.8)(react@19.2.0) + react: 19.2.0 optionalDependencies: '@types/react': 19.1.8 - '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 '@types/react-dom': 19.1.6(@types/react@19.1.8) '@radix-ui/rect@1.1.1': {} - '@shikijs/core@3.7.0': + '@shikijs/core@3.15.0': dependencies: - '@shikijs/types': 3.7.0 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 hast-util-to-html: 9.0.5 - '@shikijs/engine-javascript@3.7.0': + '@shikijs/engine-javascript@3.15.0': dependencies: - '@shikijs/types': 3.7.0 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 oniguruma-to-es: 4.3.3 - '@shikijs/engine-oniguruma@3.7.0': + '@shikijs/engine-oniguruma@3.15.0': dependencies: - '@shikijs/types': 3.7.0 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 - '@shikijs/langs@3.7.0': + '@shikijs/langs@3.15.0': dependencies: - '@shikijs/types': 3.7.0 + '@shikijs/types': 3.15.0 - '@shikijs/rehype@3.7.0': + '@shikijs/rehype@3.15.0': dependencies: - '@shikijs/types': 3.7.0 + '@shikijs/types': 3.15.0 '@types/hast': 3.0.4 hast-util-to-string: 3.0.1 - shiki: 3.7.0 + shiki: 3.15.0 unified: 11.0.5 unist-util-visit: 5.0.0 - '@shikijs/themes@3.7.0': + '@shikijs/themes@3.15.0': dependencies: - '@shikijs/types': 3.7.0 + '@shikijs/types': 3.15.0 - '@shikijs/transformers@3.7.0': + '@shikijs/transformers@3.15.0': dependencies: - '@shikijs/core': 3.7.0 - '@shikijs/types': 3.7.0 + '@shikijs/core': 3.15.0 + '@shikijs/types': 3.15.0 - '@shikijs/types@3.7.0': + '@shikijs/types@3.15.0': dependencies: '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 @@ -3079,8 +3120,6 @@ snapshots: '@standard-schema/spec@1.0.0': {} - '@swc/counter@0.1.3': {} - '@swc/helpers@0.5.15': dependencies: tslib: 2.8.1 @@ -3325,10 +3364,6 @@ snapshots: acorn@8.15.0: {} - argparse@1.0.10: - dependencies: - sprintf-js: 1.0.3 - argparse@2.0.1: {} aria-hidden@1.2.6: @@ -3339,10 +3374,6 @@ snapshots: bail@2.0.2: {} - busboy@1.6.0: - dependencies: - streamsearch: 1.1.0 - caniuse-lite@1.0.30001724: {} ccount@2.0.1: {} @@ -3385,26 +3416,6 @@ snapshots: collapse-white-space@2.1.0: {} - color-convert@2.0.1: - dependencies: - color-name: 1.1.4 - optional: true - - color-name@1.1.4: - optional: true - - color-string@1.9.1: - dependencies: - color-name: 1.1.4 - simple-swizzle: 0.2.2 - optional: true - - color@4.2.3: - dependencies: - color-convert: 2.0.1 - color-string: 1.9.1 - optional: true - comma-separated-tokens@2.0.3: {} commander@7.2.0: {} @@ -3633,6 +3644,9 @@ snapshots: detect-libc@2.0.4: {} + detect-libc@2.1.2: + optional: true + detect-node-es@1.1.0: {} devlop@1.1.0: @@ -3662,38 +3676,37 @@ snapshots: esast-util-from-estree: 2.0.0 vfile-message: 4.0.2 - esbuild@0.25.5: + esbuild@0.25.12: optionalDependencies: - '@esbuild/aix-ppc64': 0.25.5 - '@esbuild/android-arm': 0.25.5 - '@esbuild/android-arm64': 0.25.5 - '@esbuild/android-x64': 0.25.5 - '@esbuild/darwin-arm64': 0.25.5 - '@esbuild/darwin-x64': 0.25.5 - '@esbuild/freebsd-arm64': 0.25.5 - '@esbuild/freebsd-x64': 0.25.5 - '@esbuild/linux-arm': 0.25.5 - '@esbuild/linux-arm64': 0.25.5 - '@esbuild/linux-ia32': 0.25.5 - '@esbuild/linux-loong64': 0.25.5 - '@esbuild/linux-mips64el': 0.25.5 - '@esbuild/linux-ppc64': 0.25.5 - '@esbuild/linux-riscv64': 0.25.5 - '@esbuild/linux-s390x': 0.25.5 - '@esbuild/linux-x64': 0.25.5 - '@esbuild/netbsd-arm64': 0.25.5 - '@esbuild/netbsd-x64': 0.25.5 - '@esbuild/openbsd-arm64': 0.25.5 - '@esbuild/openbsd-x64': 0.25.5 - '@esbuild/sunos-x64': 0.25.5 - '@esbuild/win32-arm64': 0.25.5 - '@esbuild/win32-ia32': 0.25.5 - '@esbuild/win32-x64': 0.25.5 + '@esbuild/aix-ppc64': 0.25.12 + '@esbuild/android-arm': 0.25.12 + '@esbuild/android-arm64': 0.25.12 + '@esbuild/android-x64': 0.25.12 + '@esbuild/darwin-arm64': 0.25.12 + '@esbuild/darwin-x64': 0.25.12 + '@esbuild/freebsd-arm64': 0.25.12 + '@esbuild/freebsd-x64': 0.25.12 + '@esbuild/linux-arm': 0.25.12 + '@esbuild/linux-arm64': 0.25.12 + '@esbuild/linux-ia32': 0.25.12 + '@esbuild/linux-loong64': 0.25.12 + '@esbuild/linux-mips64el': 0.25.12 + '@esbuild/linux-ppc64': 0.25.12 + '@esbuild/linux-riscv64': 0.25.12 + '@esbuild/linux-s390x': 0.25.12 + '@esbuild/linux-x64': 0.25.12 + '@esbuild/netbsd-arm64': 0.25.12 + '@esbuild/netbsd-x64': 0.25.12 + '@esbuild/openbsd-arm64': 0.25.12 + '@esbuild/openbsd-x64': 0.25.12 + '@esbuild/openharmony-arm64': 0.25.12 + '@esbuild/sunos-x64': 0.25.12 + '@esbuild/win32-arm64': 0.25.12 + '@esbuild/win32-ia32': 0.25.12 + '@esbuild/win32-x64': 0.25.12 escape-string-regexp@5.0.0: {} - esprima@4.0.1: {} - estree-util-attach-comments@3.0.0: dependencies: '@types/estree': 1.0.8 @@ -3718,7 +3731,7 @@ snapshots: astring: 1.9.0 source-map: 0.7.4 - estree-util-value-to-estree@3.4.0: + estree-util-value-to-estree@3.5.0: dependencies: '@types/estree': 1.0.8 @@ -3733,96 +3746,105 @@ snapshots: exsolve@1.0.7: {} - extend-shallow@2.0.1: - dependencies: - is-extendable: 0.1.1 - extend@3.0.2: {} - fdir@6.4.6(picomatch@4.0.2): + fdir@6.5.0(picomatch@4.0.3): optionalDependencies: - picomatch: 4.0.2 + picomatch: 4.0.3 fflate@0.4.8: {} - fumadocs-core@15.5.1(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + fumadocs-core@16.0.8(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0): dependencies: - '@formatjs/intl-localematcher': 0.6.1 - '@orama/orama': 3.1.7 - '@shikijs/rehype': 3.7.0 - '@shikijs/transformers': 3.7.0 + '@formatjs/intl-localematcher': 0.6.2 + '@orama/orama': 3.1.16 + '@shikijs/rehype': 3.15.0 + '@shikijs/transformers': 3.15.0 + estree-util-value-to-estree: 3.5.0 github-slugger: 2.0.0 hast-util-to-estree: 3.1.3 hast-util-to-jsx-runtime: 2.3.6 image-size: 2.0.2 negotiator: 1.0.0 - react-remove-scroll: 2.7.1(@types/react@19.1.8)(react@19.1.0) + npm-to-yarn: 3.0.1 + path-to-regexp: 8.3.0 remark: 15.0.1 remark-gfm: 4.0.1 remark-rehype: 11.1.2 scroll-into-view-if-needed: 3.1.0 - shiki: 3.7.0 + shiki: 3.15.0 unist-util-visit: 5.0.0 optionalDependencies: - next: 15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + '@types/react': 19.1.8 + lucide-react: 0.525.0(react@19.2.0) + next: 16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) transitivePeerDependencies: - - '@types/react' - supports-color - fumadocs-mdx@11.6.7(acorn@8.15.0)(fumadocs-core@15.5.1(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0)): + fumadocs-mdx@13.0.5(fumadocs-core@16.0.8(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react@19.2.0): dependencies: - '@mdx-js/mdx': 3.1.0(acorn@8.15.0) + '@mdx-js/mdx': 3.1.1 '@standard-schema/spec': 1.0.0 chokidar: 4.0.3 - esbuild: 0.25.5 - estree-util-value-to-estree: 3.4.0 - fumadocs-core: 15.5.1(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - gray-matter: 4.0.3 + esbuild: 0.25.12 + estree-util-value-to-estree: 3.5.0 + fumadocs-core: 16.0.8(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0) js-yaml: 4.1.0 - lru-cache: 11.1.0 - next: 15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + lru-cache: 11.2.2 + mdast-util-to-markdown: 2.1.2 picocolors: 1.1.1 + picomatch: 4.0.3 + remark-mdx: 3.1.1 tinyexec: 1.0.1 - tinyglobby: 0.2.14 + tinyglobby: 0.2.15 + unified: 11.0.5 + unist-util-remove-position: 5.0.0 unist-util-visit: 5.0.0 - zod: 3.25.76 + zod: 4.1.12 + optionalDependencies: + next: 16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 transitivePeerDependencies: - - acorn - supports-color - fumadocs-ui@15.5.1(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tailwindcss@4.1.10): + fumadocs-ui@16.0.8(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(tailwindcss@4.1.10): dependencies: - '@radix-ui/react-accordion': 1.2.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-collapsible': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-dialog': 1.1.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-navigation-menu': 1.2.13(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-popover': 1.1.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-scroll-area': 1.2.9(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.1.0) - '@radix-ui/react-tabs': 1.1.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-accordion': 1.2.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-collapsible': 1.1.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-dialog': 1.1.15(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-navigation-menu': 1.2.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-popover': 1.1.15(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-scroll-area': 1.2.10(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@radix-ui/react-slot': 1.2.4(@types/react@19.1.8)(react@19.2.0) + '@radix-ui/react-tabs': 1.1.13(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) class-variance-authority: 0.7.1 - fumadocs-core: 15.5.1(@types/react@19.1.8)(next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + fumadocs-core: 16.0.8(@types/react@19.1.8)(lucide-react@0.525.0(react@19.2.0))(next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react-dom@19.2.0(react@19.2.0))(react@19.2.0) lodash.merge: 4.6.2 - next: 15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - next-themes: 0.4.6(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + next-themes: 0.4.6(react-dom@19.2.0(react@19.2.0))(react@19.2.0) postcss-selector-parser: 7.1.0 - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) - react-medium-image-zoom: 5.2.14(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - react-remove-scroll: 2.7.1(@types/react@19.1.8)(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + react-medium-image-zoom: 5.4.0(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + scroll-into-view-if-needed: 3.1.0 tailwind-merge: 3.3.1 optionalDependencies: + '@types/react': 19.1.8 + next: 16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0) tailwindcss: 4.1.10 transitivePeerDependencies: - - '@oramacloud/client' - - '@types/react' + - '@mixedbread/sdk' + - '@orama/core' + - '@tanstack/react-router' - '@types/react-dom' - algoliasearch + - lucide-react + - react-router - supports-color + - waku get-nonce@1.0.1: {} @@ -3832,13 +3854,6 @@ snapshots: graceful-fs@4.2.11: {} - gray-matter@4.0.3: - dependencies: - js-yaml: 3.14.1 - kind-of: 6.0.3 - section-matter: 1.0.0 - strip-bom-string: 1.0.0 - hachure-fill@0.5.2: {} hast-util-to-estree@3.1.3: @@ -3925,24 +3940,14 @@ snapshots: is-alphabetical: 2.0.1 is-decimal: 2.0.1 - is-arrayish@0.3.2: - optional: true - is-decimal@2.0.1: {} - is-extendable@0.1.1: {} - is-hexadecimal@2.0.1: {} is-plain-obj@4.1.0: {} jiti@2.4.2: {} - js-yaml@3.14.1: - dependencies: - argparse: 1.0.10 - esprima: 4.0.1 - js-yaml@4.1.0: dependencies: argparse: 2.0.1 @@ -3953,8 +3958,6 @@ snapshots: khroma@2.1.0: {} - kind-of@6.0.3: {} - kolorist@1.8.0: {} langium@3.3.1: @@ -4026,11 +4029,11 @@ snapshots: longest-streak@3.1.0: {} - lru-cache@11.1.0: {} + lru-cache@11.2.2: {} - lucide-react@0.525.0(react@19.1.0): + lucide-react@0.525.0(react@19.2.0): dependencies: - react: 19.1.0 + react: 19.2.0 magic-string@0.30.17: dependencies: @@ -4515,36 +4518,36 @@ snapshots: negotiator@1.0.0: {} - next-themes@0.4.6(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + next-themes@0.4.6(react-dom@19.2.0(react@19.2.0))(react@19.2.0): dependencies: - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) - next@15.3.3(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + next@16.0.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0): dependencies: - '@next/env': 15.3.3 - '@swc/counter': 0.1.3 + '@next/env': 16.0.1 '@swc/helpers': 0.5.15 - busboy: 1.6.0 caniuse-lite: 1.0.30001724 postcss: 8.4.31 - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) - styled-jsx: 5.1.6(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + styled-jsx: 5.1.6(react@19.2.0) optionalDependencies: - '@next/swc-darwin-arm64': 15.3.3 - '@next/swc-darwin-x64': 15.3.3 - '@next/swc-linux-arm64-gnu': 15.3.3 - '@next/swc-linux-arm64-musl': 15.3.3 - '@next/swc-linux-x64-gnu': 15.3.3 - '@next/swc-linux-x64-musl': 15.3.3 - '@next/swc-win32-arm64-msvc': 15.3.3 - '@next/swc-win32-x64-msvc': 15.3.3 - sharp: 0.34.2 + '@next/swc-darwin-arm64': 16.0.1 + '@next/swc-darwin-x64': 16.0.1 + '@next/swc-linux-arm64-gnu': 16.0.1 + '@next/swc-linux-arm64-musl': 16.0.1 + '@next/swc-linux-x64-gnu': 16.0.1 + '@next/swc-linux-x64-musl': 16.0.1 + '@next/swc-win32-arm64-msvc': 16.0.1 + '@next/swc-win32-x64-msvc': 16.0.1 + sharp: 0.34.5 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros + npm-to-yarn@3.0.1: {} + oniguruma-parser@0.12.1: {} oniguruma-to-es@4.3.3: @@ -4567,11 +4570,13 @@ snapshots: path-data-parser@0.1.0: {} + path-to-regexp@8.3.0: {} + pathe@2.0.3: {} picocolors@1.1.1: {} - picomatch@4.0.2: {} + picomatch@4.0.3: {} pkg-types@1.3.1: dependencies: @@ -4625,48 +4630,48 @@ snapshots: quansync@0.2.10: {} - react-dom@19.1.0(react@19.1.0): + react-dom@19.2.0(react@19.2.0): dependencies: - react: 19.1.0 - scheduler: 0.26.0 + react: 19.2.0 + scheduler: 0.27.0 - react-icons@5.5.0(react@19.1.0): + react-icons@5.5.0(react@19.2.0): dependencies: - react: 19.1.0 + react: 19.2.0 - react-medium-image-zoom@5.2.14(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + react-medium-image-zoom@5.4.0(react-dom@19.2.0(react@19.2.0))(react@19.2.0): dependencies: - react: 19.1.0 - react-dom: 19.1.0(react@19.1.0) + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) - react-remove-scroll-bar@2.3.8(@types/react@19.1.8)(react@19.1.0): + react-remove-scroll-bar@2.3.8(@types/react@19.1.8)(react@19.2.0): dependencies: - react: 19.1.0 - react-style-singleton: 2.2.3(@types/react@19.1.8)(react@19.1.0) + react: 19.2.0 + react-style-singleton: 2.2.3(@types/react@19.1.8)(react@19.2.0) tslib: 2.8.1 optionalDependencies: '@types/react': 19.1.8 - react-remove-scroll@2.7.1(@types/react@19.1.8)(react@19.1.0): + react-remove-scroll@2.7.1(@types/react@19.1.8)(react@19.2.0): dependencies: - react: 19.1.0 - react-remove-scroll-bar: 2.3.8(@types/react@19.1.8)(react@19.1.0) - react-style-singleton: 2.2.3(@types/react@19.1.8)(react@19.1.0) + react: 19.2.0 + react-remove-scroll-bar: 2.3.8(@types/react@19.1.8)(react@19.2.0) + react-style-singleton: 2.2.3(@types/react@19.1.8)(react@19.2.0) tslib: 2.8.1 - use-callback-ref: 1.3.3(@types/react@19.1.8)(react@19.1.0) - use-sidecar: 1.1.3(@types/react@19.1.8)(react@19.1.0) + use-callback-ref: 1.3.3(@types/react@19.1.8)(react@19.2.0) + use-sidecar: 1.1.3(@types/react@19.1.8)(react@19.2.0) optionalDependencies: '@types/react': 19.1.8 - react-style-singleton@2.2.3(@types/react@19.1.8)(react@19.1.0): + react-style-singleton@2.2.3(@types/react@19.1.8)(react@19.2.0): dependencies: get-nonce: 1.0.1 - react: 19.1.0 + react: 19.2.0 tslib: 2.8.1 optionalDependencies: '@types/react': 19.1.8 - react@19.1.0: {} + react@19.2.0: {} readdirp@4.1.2: {} @@ -4736,6 +4741,13 @@ snapshots: transitivePeerDependencies: - supports-color + remark-mdx@3.1.1: + dependencies: + mdast-util-mdx: 3.0.0 + micromark-extension-mdxjs: 3.0.0 + transitivePeerDependencies: + - supports-color + remark-parse@11.0.0: dependencies: '@types/mdast': 4.0.4 @@ -4781,82 +4793,69 @@ snapshots: safer-buffer@2.1.2: {} - scheduler@0.26.0: {} + scheduler@0.27.0: {} scroll-into-view-if-needed@3.1.0: dependencies: compute-scroll-into-view: 3.1.1 - section-matter@1.0.0: - dependencies: - extend-shallow: 2.0.1 - kind-of: 6.0.3 - - semver@7.7.2: + semver@7.7.3: optional: true - sharp@0.34.2: + sharp@0.34.5: dependencies: - color: 4.2.3 - detect-libc: 2.0.4 - semver: 7.7.2 + '@img/colour': 1.0.0 + detect-libc: 2.1.2 + semver: 7.7.3 optionalDependencies: - '@img/sharp-darwin-arm64': 0.34.2 - '@img/sharp-darwin-x64': 0.34.2 - '@img/sharp-libvips-darwin-arm64': 1.1.0 - '@img/sharp-libvips-darwin-x64': 1.1.0 - '@img/sharp-libvips-linux-arm': 1.1.0 - '@img/sharp-libvips-linux-arm64': 1.1.0 - '@img/sharp-libvips-linux-ppc64': 1.1.0 - '@img/sharp-libvips-linux-s390x': 1.1.0 - '@img/sharp-libvips-linux-x64': 1.1.0 - '@img/sharp-libvips-linuxmusl-arm64': 1.1.0 - '@img/sharp-libvips-linuxmusl-x64': 1.1.0 - '@img/sharp-linux-arm': 0.34.2 - '@img/sharp-linux-arm64': 0.34.2 - '@img/sharp-linux-s390x': 0.34.2 - '@img/sharp-linux-x64': 0.34.2 - '@img/sharp-linuxmusl-arm64': 0.34.2 - '@img/sharp-linuxmusl-x64': 0.34.2 - '@img/sharp-wasm32': 0.34.2 - '@img/sharp-win32-arm64': 0.34.2 - '@img/sharp-win32-ia32': 0.34.2 - '@img/sharp-win32-x64': 0.34.2 + '@img/sharp-darwin-arm64': 0.34.5 + '@img/sharp-darwin-x64': 0.34.5 + '@img/sharp-libvips-darwin-arm64': 1.2.4 + '@img/sharp-libvips-darwin-x64': 1.2.4 + '@img/sharp-libvips-linux-arm': 1.2.4 + '@img/sharp-libvips-linux-arm64': 1.2.4 + '@img/sharp-libvips-linux-ppc64': 1.2.4 + '@img/sharp-libvips-linux-riscv64': 1.2.4 + '@img/sharp-libvips-linux-s390x': 1.2.4 + '@img/sharp-libvips-linux-x64': 1.2.4 + '@img/sharp-libvips-linuxmusl-arm64': 1.2.4 + '@img/sharp-libvips-linuxmusl-x64': 1.2.4 + '@img/sharp-linux-arm': 0.34.5 + '@img/sharp-linux-arm64': 0.34.5 + '@img/sharp-linux-ppc64': 0.34.5 + '@img/sharp-linux-riscv64': 0.34.5 + '@img/sharp-linux-s390x': 0.34.5 + '@img/sharp-linux-x64': 0.34.5 + '@img/sharp-linuxmusl-arm64': 0.34.5 + '@img/sharp-linuxmusl-x64': 0.34.5 + '@img/sharp-wasm32': 0.34.5 + '@img/sharp-win32-arm64': 0.34.5 + '@img/sharp-win32-ia32': 0.34.5 + '@img/sharp-win32-x64': 0.34.5 optional: true - shiki@3.7.0: + shiki@3.15.0: dependencies: - '@shikijs/core': 3.7.0 - '@shikijs/engine-javascript': 3.7.0 - '@shikijs/engine-oniguruma': 3.7.0 - '@shikijs/langs': 3.7.0 - '@shikijs/themes': 3.7.0 - '@shikijs/types': 3.7.0 + '@shikijs/core': 3.15.0 + '@shikijs/engine-javascript': 3.15.0 + '@shikijs/engine-oniguruma': 3.15.0 + '@shikijs/langs': 3.15.0 + '@shikijs/themes': 3.15.0 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 - simple-swizzle@0.2.2: - dependencies: - is-arrayish: 0.3.2 - optional: true - source-map-js@1.2.1: {} source-map@0.7.4: {} space-separated-tokens@2.0.2: {} - sprintf-js@1.0.3: {} - - streamsearch@1.1.0: {} - stringify-entities@4.0.4: dependencies: character-entities-html4: 2.1.0 character-entities-legacy: 3.0.0 - strip-bom-string@1.0.0: {} - style-to-js@1.1.17: dependencies: style-to-object: 1.0.9 @@ -4865,10 +4864,10 @@ snapshots: dependencies: inline-style-parser: 0.2.4 - styled-jsx@5.1.6(react@19.1.0): + styled-jsx@5.1.6(react@19.2.0): dependencies: client-only: 0.0.1 - react: 19.1.0 + react: 19.2.0 stylis@4.3.6: {} @@ -4889,10 +4888,10 @@ snapshots: tinyexec@1.0.1: {} - tinyglobby@0.2.14: + tinyglobby@0.2.15: dependencies: - fdir: 6.4.6(picomatch@4.0.2) - picomatch: 4.0.2 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 trim-lines@3.0.1: {} @@ -4930,6 +4929,11 @@ snapshots: dependencies: '@types/unist': 3.0.3 + unist-util-remove-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-visit: 5.0.0 + unist-util-stringify-position@4.0.0: dependencies: '@types/unist': 3.0.3 @@ -4945,17 +4949,17 @@ snapshots: unist-util-is: 6.0.0 unist-util-visit-parents: 6.0.1 - use-callback-ref@1.3.3(@types/react@19.1.8)(react@19.1.0): + use-callback-ref@1.3.3(@types/react@19.1.8)(react@19.2.0): dependencies: - react: 19.1.0 + react: 19.2.0 tslib: 2.8.1 optionalDependencies: '@types/react': 19.1.8 - use-sidecar@1.1.3(@types/react@19.1.8)(react@19.1.0): + use-sidecar@1.1.3(@types/react@19.1.8)(react@19.2.0): dependencies: detect-node-es: 1.1.0 - react: 19.1.0 + react: 19.2.0 tslib: 2.8.1 optionalDependencies: '@types/react': 19.1.8 @@ -4995,6 +4999,6 @@ snapshots: yallist@5.0.0: {} - zod@3.25.76: {} + zod@4.1.12: {} zwitch@2.0.4: {} diff --git a/docs/public/img/grounding-with-gemini3.gif b/docs/public/img/grounding-with-gemini3.gif new file mode 100644 index 00000000..57404ba0 Binary files /dev/null and b/docs/public/img/grounding-with-gemini3.gif differ diff --git a/docs/public/img/laminar_trace_example.png b/docs/public/img/laminar_trace_example.png new file mode 100644 index 00000000..75c3139d Binary files /dev/null and b/docs/public/img/laminar_trace_example.png differ diff --git a/docs/source.config.ts b/docs/source.config.ts index dd019e8f..d915345e 100644 --- a/docs/source.config.ts +++ b/docs/source.config.ts @@ -6,12 +6,12 @@ import { z } from 'zod'; export const docs = defineDocs({ docs: { schema: frontmatterSchema.extend({ + macos: z.boolean().optional(), + windows: z.boolean().optional(), + linux: z.boolean().optional(), pypi: z.string().optional(), npm: z.string().optional(), github: z.array(z.string()).optional(), - macos: z.boolean().default(false), - windows: z.boolean().default(false), - linux: z.boolean().default(false), }), }, meta: { diff --git a/docs/src/app/(home)/[[...slug]]/page.tsx b/docs/src/app/(home)/[[...slug]]/page.tsx index 60840ece..e5f279a0 100644 --- a/docs/src/app/(home)/[[...slug]]/page.tsx +++ b/docs/src/app/(home)/[[...slug]]/page.tsx @@ -8,15 +8,16 @@ import { cn } from 'fumadocs-ui/utils/cn'; import { ChevronDown, CodeXml, ExternalLink } from 'lucide-react'; import type { Metadata } from 'next'; import Link from 'next/link'; -import { notFound, redirect } from 'next/navigation'; +import { notFound } from 'next/navigation'; import { PageFeedback } from '@/components/page-feedback'; import { DocActionsMenu } from '@/components/doc-actions-menu'; export default async function Page(props: { params: Promise<{ slug?: string[] }> }) { const params = await props.params; const slug = params.slug || []; + const page = source.getPage(slug); - if (!page) notFound(); //redirect('/docs'); + if (!page) notFound(); // Detect if this is an API reference page: /api/[section] or /api/[section]/[version] let apiSection: string | null = null; @@ -179,9 +180,13 @@ export default async function Page(props: { params: Promise<{ slug?: string[] }> }; const tocFooter = () => { + // Construct file path from slug + // For root index, use 'index.mdx', otherwise join slug parts + const filePath = slug.length === 0 ? 'index.mdx' : `${slug.join('/')}.mdx`; + return (
- +
); }; @@ -282,9 +287,9 @@ export async function generateMetadata(props: { const page = source.getPage(params.slug); if (!page) notFound(); - let title = `${page.data.title} | Cua Docs`; - if (page.url.includes('api')) title = `${page.data.title} | Cua API Docs`; - if (page.url.includes('guide')) title = ` Guide: ${page.data.title} | Cua Docs`; + let title = `${page.data.title} | Cua`; + if (page.url.includes('api')) title = `${page.data.title} | Cua API`; + if (page.url.includes('guide')) title = ` Guide: ${page.data.title} | Cua`; // Canonical URL points to cua.ai to consolidate all SEO authority on main domain const canonicalUrl = `https://cua.ai${page.url}`; @@ -368,7 +373,7 @@ export async function generateMetadata(props: { title, description: page.data.description, type: 'article', - siteName: 'Cua Docs', + siteName: 'Cua', url: canonicalUrl, }, twitter: { diff --git a/docs/src/app/global.css b/docs/src/app/global.css index 50b3bc29..65ae0960 100644 --- a/docs/src/app/global.css +++ b/docs/src/app/global.css @@ -1,3 +1,14 @@ @import 'tailwindcss'; @import 'fumadocs-ui/css/neutral.css'; @import 'fumadocs-ui/css/preset.css'; + +/* Fix TOC overflow on production builds */ +#nd-toc { + overflow-y: auto; + overflow-x: hidden; +} + +#nd-toc > div { + overflow-y: auto; + overflow-x: hidden; +} diff --git a/docs/src/app/layout.config.tsx b/docs/src/app/layout.config.tsx index 87c652f1..f47250c5 100644 --- a/docs/src/app/layout.config.tsx +++ b/docs/src/app/layout.config.tsx @@ -34,9 +34,10 @@ export const baseOptions: BaseLayoutProps = { className="hidden dark:block" alt="Logo" /> - Cua Documentation + Cua ), + url: 'https://cua.ai', }, githubUrl: 'https://github.com/trycua/cua', links: [ diff --git a/docs/src/components/doc-actions-menu.tsx b/docs/src/components/doc-actions-menu.tsx index 779e703c..067627ca 100644 --- a/docs/src/components/doc-actions-menu.tsx +++ b/docs/src/components/doc-actions-menu.tsx @@ -7,7 +7,7 @@ import posthog from 'posthog-js'; interface DocActionsMenuProps { pageUrl: string; pageTitle: string; - filePath: string; + filePath?: string; } export function DocActionsMenu({ pageUrl, pageTitle, filePath }: DocActionsMenuProps) { @@ -15,6 +15,9 @@ export function DocActionsMenu({ pageUrl, pageTitle, filePath }: DocActionsMenuP const handleCopyMarkdown = async () => { try { + if (!filePath) { + throw new Error('No file path available'); + } const githubRawUrl = `https://raw.githubusercontent.com/trycua/cua/refs/heads/main/docs/content/docs/${filePath}`; const response = await fetch(githubRawUrl); @@ -55,6 +58,9 @@ export function DocActionsMenu({ pageUrl, pageTitle, filePath }: DocActionsMenuP }; const handleEditGithub = () => { + if (!filePath) { + return; + } posthog.capture('docs_edit_github_clicked', { page: pageUrl, page_title: pageTitle, diff --git a/docs/src/components/footer.tsx b/docs/src/components/footer.tsx index b129eebe..45a7e2d8 100644 --- a/docs/src/components/footer.tsx +++ b/docs/src/components/footer.tsx @@ -56,7 +56,7 @@ export function Footer() {
  • Quick Start diff --git a/docs/src/components/hero.tsx b/docs/src/components/hero.tsx new file mode 100644 index 00000000..8f4695c8 --- /dev/null +++ b/docs/src/components/hero.tsx @@ -0,0 +1,34 @@ +export function Hero({ children }: { children: React.ReactNode }) { + return ( +
    + {/* Background Pattern */} +
    + {/* Grid */} + + + + + + + + + + {/* Subtle glow effects */} +
    +
    +
    + + {/* Content */} +
    {children}
    +
    + ); +} diff --git a/docs/src/lib/llms.ts b/docs/src/lib/llms.ts index e485bed2..56b0f5b1 100644 --- a/docs/src/lib/llms.ts +++ b/docs/src/lib/llms.ts @@ -12,15 +12,24 @@ const processor = remark() .use(remarkGfm); export async function getLLMText(page: InferPageType) { - const processed = await processor.process({ - path: page.data._file.absolutePath, - value: page.data.content, - }); + const pageData = page.data as any; + const filePath = pageData._file?.absolutePath; + const content = pageData.content || pageData.body || ''; + + let processed; + if (filePath && typeof content === 'string') { + processed = await processor.process({ path: filePath, value: content }); + } else if (typeof content === 'string') { + processed = await processor.process(content); + } else { + // Handle case where content is not available + processed = { value: '' }; + } return `# ${page.data.title} URL: ${page.url} -${page.data.description} +${page.data.description || ''} ${processed.value}`; } diff --git a/docs/src/mdx-components.tsx b/docs/src/mdx-components.tsx index 4e73fd57..35f77a33 100644 --- a/docs/src/mdx-components.tsx +++ b/docs/src/mdx-components.tsx @@ -9,6 +9,7 @@ import { EditableForm, EditableInput, } from './components/editable-code-block'; +import { Hero } from './components/hero'; // use this function to get MDX components, you will need it for rendering MDX export function getMDXComponents(components?: MDXComponents): MDXComponents { @@ -20,6 +21,7 @@ export function getMDXComponents(components?: MDXComponents): MDXComponents { EditableValue, EditableForm, EditableInput, + Hero, ...TabsComponents, ...components, }; diff --git a/docs/src/providers/posthog-provider.tsx b/docs/src/providers/posthog-provider.tsx index eb9b1d9c..ca62a214 100644 --- a/docs/src/providers/posthog-provider.tsx +++ b/docs/src/providers/posthog-provider.tsx @@ -6,13 +6,19 @@ import { useEffect } from 'react'; import { usePathname, useSearchParams } from 'next/navigation'; if (typeof window !== 'undefined') { - posthog.init(process.env.NEXT_PUBLIC_POSTHOG_API_KEY!, { - api_host: '/docs/api/posthog', - ui_host: process.env.NEXT_PUBLIC_POSTHOG_HOST, - person_profiles: 'always', - capture_pageview: false, - capture_pageleave: true, - }); + const apiKey = process.env.NEXT_PUBLIC_POSTHOG_API_KEY; + + if (apiKey) { + posthog.init(apiKey, { + api_host: '/docs/api/posthog', + ui_host: process.env.NEXT_PUBLIC_POSTHOG_HOST, + person_profiles: 'always', + capture_pageview: false, + capture_pageleave: true, + }); + } else { + console.warn('[PostHog] API key not configured. Analytics will be disabled.'); + } } export function PHProvider({ children }: { children: React.ReactNode }) { diff --git a/docs/tsconfig.json b/docs/tsconfig.json index 8730cf88..34346f5f 100644 --- a/docs/tsconfig.json +++ b/docs/tsconfig.json @@ -13,7 +13,7 @@ "moduleResolution": "bundler", "resolveJsonModule": true, "isolatedModules": true, - "jsx": "preserve", + "jsx": "react-jsx", "incremental": true, "paths": { "@/.source": ["./.source/index.ts"], @@ -25,6 +25,12 @@ } ] }, - "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "include": [ + "next-env.d.ts", + "**/*.ts", + "**/*.tsx", + ".next/types/**/*.ts", + ".next/dev/types/**/*.ts" + ], "exclude": ["node_modules"] } diff --git a/examples/agent_examples.py b/examples/agent_examples.py index 42fa5c7f..bb3ca6e5 100644 --- a/examples/agent_examples.py +++ b/examples/agent_examples.py @@ -45,7 +45,7 @@ async def run_agent_example(): # model="anthropic/claude-opus-4-20250514", # model="anthropic/claude-sonnet-4-20250514", # model="anthropic/claude-3-7-sonnet-20250219", - # model="anthropic/claude-3-5-sonnet-20241022", + # model="anthropic/claude-sonnet-4-5-20250929", # == UI-TARS == # model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", # model="mlx/mlx-community/UI-TARS-1.5-7B-6bit", @@ -53,6 +53,10 @@ async def run_agent_example(): # == Omniparser + Any LLM == # model="omniparser+anthropic/claude-opus-4-20250514", # model="omniparser+ollama_chat/gemma3:12b-it-q4_K_M", + # == Omniparser + Vertex AI Gemini 3 (with thinking_level) == + # model="omni+vertex_ai/gemini-3-flash", + # thinking_level="high", # or "low" + # media_resolution="medium", # or "low" or "high" tools=[computer], only_n_most_recent_images=3, verbosity=logging.DEBUG, diff --git a/examples/cloud_api_examples.py b/examples/cloud_api_examples.py index dd7d95ee..3444aa30 100644 --- a/examples/cloud_api_examples.py +++ b/examples/cloud_api_examples.py @@ -9,14 +9,13 @@ from computer.providers.cloud.provider import CloudProvider async def main() -> None: - api_key = os.getenv("CUA_API_KEY") - if not api_key: - raise RuntimeError("CUA_API_KEY environment variable is not set") + # CloudProvider will automatically read CUA_API_KEY from environment if not provided + # You can still pass api_key explicitly if needed: CloudProvider(api_key="your_key") api_base = os.getenv("CUA_API_BASE") if api_base: print(f"Using API base: {api_base}") - provider = CloudProvider(api_key=api_key, verbose=True) + provider = CloudProvider(verbose=True) async with provider: # List all VMs diff --git a/examples/computer-example-ts/README.md b/examples/computer-example-ts/README.md index 7e7fc81e..b83838ce 100644 --- a/examples/computer-example-ts/README.md +++ b/examples/computer-example-ts/README.md @@ -34,14 +34,6 @@ This example demonstrates how to control a Cua Cloud Sandbox using the OpenAI `c - `src/index.ts` β€” Main example script - `src/helpers.ts` β€” Helper for executing actions on the container -## Further Reading - -For a step-by-step tutorial and more detailed explanation, see the accompanying blog post: - -➑️ [Controlling a Cua Cloud Sandbox with JavaScript](https://placeholder-url-to-blog-post.com) - -_(This link will be updated once the article is published.)_ - --- If you have questions or issues, please open an issue or contact the maintainers. diff --git a/libs/lume/README.md b/libs/lume/README.md index 6d1c12a7..0d287b04 100644 --- a/libs/lume/README.md +++ b/libs/lume/README.md @@ -58,7 +58,7 @@ To get set up with Lume for development, read [these instructions](Development.m - [Installation](https://cua.ai/docs/libraries/lume/installation) - [Prebuilt Images](https://cua.ai/docs/libraries/lume/prebuilt-images) - [CLI Reference](https://cua.ai/docs/libraries/lume/cli-reference) -- [HTTP API](https://cuai.ai/docs/libraries/lume/http-api) +- [HTTP API](https://cua.ai/docs/libraries/lume/http-api) - [FAQ](https://cua.ai/docs/libraries/lume/faq) ## Contributing diff --git a/libs/python/agent/.bumpversion.cfg b/libs/python/agent/.bumpversion.cfg index ab6acb97..eec12a57 100644 --- a/libs/python/agent/.bumpversion.cfg +++ b/libs/python/agent/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.4.37 +current_version = 0.5.1 commit = True tag = True tag_name = agent-v{new_version} diff --git a/libs/python/agent/README.md b/libs/python/agent/README.md index 1c4b1cc5..40b901a3 100644 --- a/libs/python/agent/README.md +++ b/libs/python/agent/README.md @@ -51,7 +51,7 @@ async def main(): # Create agent agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", + model="anthropic/claude-sonnet-4-5-20250929", tools=[computer], only_n_most_recent_images=3, trajectory_dir="trajectories", @@ -78,7 +78,7 @@ if __name__ == "__main__": - [Chat History](https://cua.ai/docs/agent-sdk/chat-history) - [Callbacks](https://cua.ai/docs/agent-sdk/callbacks) - [Custom Tools](https://cua.ai/docs/agent-sdk/custom-tools) -- [Custom Computer Handlers](https://cua.ai/docs/agent-sdk/custom-computer-handlers) +- [Custom Computer Handlers](https://cua.ai/docs/computer-sdk/custom-computer-handlers) - [Prompt Caching](https://cua.ai/docs/agent-sdk/prompt-caching) - [Usage Tracking](https://cua.ai/docs/agent-sdk/usage-tracking) - [Benchmarks](https://cua.ai/docs/agent-sdk/benchmarks) diff --git a/libs/python/agent/agent/adapters/__init__.py b/libs/python/agent/agent/adapters/__init__.py index 1f07a9fc..cded1d34 100644 --- a/libs/python/agent/agent/adapters/__init__.py +++ b/libs/python/agent/agent/adapters/__init__.py @@ -2,6 +2,7 @@ Adapters package for agent - Custom LLM adapters for LiteLLM """ +from .cua_adapter import CUAAdapter from .huggingfacelocal_adapter import HuggingFaceLocalAdapter from .human_adapter import HumanAdapter from .mlxvlm_adapter import MLXVLMAdapter @@ -10,4 +11,5 @@ __all__ = [ "HuggingFaceLocalAdapter", "HumanAdapter", "MLXVLMAdapter", + "CUAAdapter", ] diff --git a/libs/python/agent/agent/adapters/cua_adapter.py b/libs/python/agent/agent/adapters/cua_adapter.py new file mode 100644 index 00000000..76ecb7e4 --- /dev/null +++ b/libs/python/agent/agent/adapters/cua_adapter.py @@ -0,0 +1,145 @@ +import os +from typing import Any, AsyncIterator, Iterator + +from litellm import acompletion, completion +from litellm.llms.custom_llm import CustomLLM +from litellm.types.utils import GenericStreamingChunk, ModelResponse + + +class CUAAdapter(CustomLLM): + def __init__(self, base_url: str | None = None, api_key: str | None = None, **_: Any): + super().__init__() + self.base_url = base_url or os.environ.get("CUA_BASE_URL") or "https://inference.cua.ai/v1" + self.api_key = ( + api_key or os.environ.get("CUA_INFERENCE_API_KEY") or os.environ.get("CUA_API_KEY") + ) + + def _normalize_model(self, model: str) -> str: + # Accept either "cua/" or raw "" + return model.split("/", 1)[1] if model and model.startswith("cua/") else model + + def completion(self, *args, **kwargs) -> ModelResponse: + model = kwargs.get("model", "") + api_base = kwargs.get("api_base") or self.base_url + if "anthropic/" in model: + model = f"anthropic/{self._normalize_model(model)}" + api_base = api_base.removesuffix("/v1") + else: + model = f"openai/{self._normalize_model(model)}" + + params = { + "model": model, + "messages": kwargs.get("messages", []), + "api_base": api_base, + "api_key": kwargs.get("api_key") or self.api_key, + "stream": False, + } + + if "optional_params" in kwargs: + params.update(kwargs["optional_params"]) + del kwargs["optional_params"] + + if "headers" in kwargs: + params["headers"] = kwargs["headers"] + del kwargs["headers"] + + # Print dropped parameters + original_keys = set(kwargs.keys()) + used_keys = set(params.keys()) # Only these are extracted from kwargs + ignored_keys = { + "litellm_params", + "client", + "print_verbose", + "acompletion", + "timeout", + "logging_obj", + "encoding", + "custom_prompt_dict", + "model_response", + "logger_fn", + } + dropped_keys = original_keys - used_keys - ignored_keys + if dropped_keys: + dropped_keyvals = {k: kwargs[k] for k in dropped_keys} + # print(f"CUAAdapter.completion: Dropped parameters: {dropped_keyvals}") + + return completion(**params) # type: ignore + + async def acompletion(self, *args, **kwargs) -> ModelResponse: + model = kwargs.get("model", "") + api_base = kwargs.get("api_base") or self.base_url + if "anthropic/" in model: + model = f"anthropic/{self._normalize_model(model)}" + api_base = api_base.removesuffix("/v1") + else: + model = f"openai/{self._normalize_model(model)}" + + params = { + "model": model, + "messages": kwargs.get("messages", []), + "api_base": api_base, + "api_key": kwargs.get("api_key") or self.api_key, + "stream": False, + } + + if "optional_params" in kwargs: + params.update(kwargs["optional_params"]) + del kwargs["optional_params"] + + if "headers" in kwargs: + params["headers"] = kwargs["headers"] + del kwargs["headers"] + + # Print dropped parameters + original_keys = set(kwargs.keys()) + used_keys = set(params.keys()) # Only these are extracted from kwargs + ignored_keys = { + "litellm_params", + "client", + "print_verbose", + "acompletion", + "timeout", + "logging_obj", + "encoding", + "custom_prompt_dict", + "model_response", + "logger_fn", + } + dropped_keys = original_keys - used_keys - ignored_keys + if dropped_keys: + dropped_keyvals = {k: kwargs[k] for k in dropped_keys} + # print(f"CUAAdapter.acompletion: Dropped parameters: {dropped_keyvals}") + + response = await acompletion(**params) # type: ignore + + return response + + def streaming(self, *args, **kwargs) -> Iterator[GenericStreamingChunk]: + params = dict(kwargs) + inner_model = self._normalize_model(params.get("model", "")) + params.update( + { + "model": f"openai/{inner_model}", + "api_base": self.base_url, + "api_key": self.api_key, + "stream": True, + } + ) + # Yield chunks directly from LiteLLM's streaming generator + for chunk in completion(**params): # type: ignore + yield chunk # type: ignore + + async def astreaming(self, *args, **kwargs) -> AsyncIterator[GenericStreamingChunk]: + params = dict(kwargs) + inner_model = self._normalize_model(params.get("model", "")) + params.update( + { + "model": f"openai/{inner_model}", + "api_base": self.base_url, + "api_key": self.api_key, + "stream": True, + } + ) + stream = await acompletion(**params) # type: ignore + async for chunk in stream: # type: ignore + yield chunk # type: ignore diff --git a/libs/python/agent/agent/agent.py b/libs/python/agent/agent/agent.py index f85c513c..47e0bae3 100644 --- a/libs/python/agent/agent/agent.py +++ b/libs/python/agent/agent/agent.py @@ -23,11 +23,7 @@ import litellm import litellm.utils from litellm.responses.utils import Usage -from .adapters import ( - HuggingFaceLocalAdapter, - HumanAdapter, - MLXVLMAdapter, -) +from .adapters import CUAAdapter, HuggingFaceLocalAdapter, HumanAdapter, MLXVLMAdapter from .callbacks import ( BudgetManagerCallback, ImageRetentionCallback, @@ -193,7 +189,7 @@ class ComputerAgent: Initialize ComputerAgent. Args: - model: Model name (e.g., "claude-3-5-sonnet-20241022", "computer-use-preview", "omni+vertex_ai/gemini-pro") + model: Model name (e.g., "claude-sonnet-4-5-20250929", "computer-use-preview", "omni+vertex_ai/gemini-pro") tools: List of tools (computer objects, decorated functions, etc.) custom_loop: Custom agent loop function to use instead of auto-selection only_n_most_recent_images: If set, only keep the N most recent images in message history. Adds ImageRetentionCallback automatically. @@ -241,13 +237,6 @@ class ComputerAgent: if self.instructions: self.callbacks.append(PromptInstructionsCallback(self.instructions)) - # Add telemetry callback if telemetry_enabled is set - if self.telemetry_enabled: - if isinstance(self.telemetry_enabled, bool): - self.callbacks.append(TelemetryCallback(self)) - else: - self.callbacks.append(TelemetryCallback(self, **self.telemetry_enabled)) - # Add logging callback if verbosity is set if self.verbosity is not None: self.callbacks.append(LoggingCallback(level=self.verbosity)) @@ -278,10 +267,12 @@ class ComputerAgent: ) human_adapter = HumanAdapter() mlx_adapter = MLXVLMAdapter() + cua_adapter = CUAAdapter() litellm.custom_provider_map = [ {"provider": "huggingface-local", "custom_handler": hf_adapter}, {"provider": "human", "custom_handler": human_adapter}, {"provider": "mlx", "custom_handler": mlx_adapter}, + {"provider": "cua", "custom_handler": cua_adapter}, ] litellm.suppress_debug_info = True @@ -299,6 +290,13 @@ class ComputerAgent: self.agent_loop = config_info.agent_class() self.agent_config_info = config_info + # Add telemetry callback AFTER agent_loop is set so it can capture the correct agent_type + if self.telemetry_enabled: + if isinstance(self.telemetry_enabled, bool): + self.callbacks.append(TelemetryCallback(self)) + else: + self.callbacks.append(TelemetryCallback(self, **self.telemetry_enabled)) + self.tool_schemas = [] self.computer_handler = None diff --git a/libs/python/agent/agent/callbacks/telemetry.py b/libs/python/agent/agent/callbacks/telemetry.py index d8e77e1d..dc86ca74 100644 --- a/libs/python/agent/agent/callbacks/telemetry.py +++ b/libs/python/agent/agent/callbacks/telemetry.py @@ -60,11 +60,14 @@ class TelemetryCallback(AsyncCallbackHandler): def _record_agent_initialization(self) -> None: """Record agent type/model and session initialization.""" + # Get the agent loop type (class name) + agent_type = "unknown" + if hasattr(self.agent, "agent_loop") and self.agent.agent_loop is not None: + agent_type = type(self.agent.agent_loop).__name__ + agent_info = { "session_id": self.session_id, - "agent_type": ( - self.agent.agent_loop.__name__ if hasattr(self.agent, "agent_loop") else "unknown" - ), + "agent_type": agent_type, "model": getattr(self.agent, "model", "unknown"), **SYSTEM_INFO, } diff --git a/libs/python/agent/agent/cli.py b/libs/python/agent/agent/cli.py index dcd4544b..970214ce 100644 --- a/libs/python/agent/agent/cli.py +++ b/libs/python/agent/agent/cli.py @@ -6,8 +6,8 @@ Usage: Examples: python -m agent.cli openai/computer-use-preview - python -m agent.cli anthropic/claude-3-5-sonnet-20241022 - python -m agent.cli omniparser+anthropic/claude-3-5-sonnet-20241022 + python -m agent.cli anthropic/claude-sonnet-4-5-20250929 + python -m agent.cli omniparser+anthropic/claude-sonnet-4-5-20250929 """ try: @@ -232,15 +232,15 @@ async def main(): epilog=""" Examples: python -m agent.cli openai/computer-use-preview - python -m agent.cli anthropic/claude-3-5-sonnet-20241022 - python -m agent.cli omniparser+anthropic/claude-3-5-sonnet-20241022 + python -m agent.cli anthropic/claude-sonnet-4-5-20250929 + python -m agent.cli omniparser+anthropic/claude-sonnet-4-5-20250929 python -m agent.cli huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B """, ) parser.add_argument( "model", - help="Model string (e.g., 'openai/computer-use-preview', 'anthropic/claude-3-5-sonnet-20241022')", + help="Model string (e.g., 'openai/computer-use-preview', 'anthropic/claude-sonnet-4-5-20250929')", ) parser.add_argument( diff --git a/libs/python/agent/agent/loops/__init__.py b/libs/python/agent/agent/loops/__init__.py index ab23ac27..f6d3dc59 100644 --- a/libs/python/agent/agent/loops/__init__.py +++ b/libs/python/agent/agent/loops/__init__.py @@ -1,36 +1,42 @@ -""" -Agent loops for agent -""" - -# Import the loops to register them -from . import ( - anthropic, - composed_grounded, - gemini, - glm45v, - gta1, - holo, - internvl, - moondream3, - omniparser, - openai, - opencua, - qwen, - uitars, -) - -__all__ = [ - "anthropic", - "openai", - "uitars", - "omniparser", - "gta1", - "composed_grounded", - "glm45v", - "opencua", - "internvl", - "holo", - "moondream3", - "gemini", - "qwen", -] +""" +Agent loops for agent +""" + +# Import the loops to register them +from . import ( + anthropic, + composed_grounded, + gelato, + gemini, + glm45v, + gta1, + holo, + internvl, + moondream3, + omniparser, + openai, + opencua, + generic_vlm, + uiins, + uitars, + uitars2, +) + +__all__ = [ + "anthropic", + "openai", + "uitars", + "omniparser", + "gta1", + "composed_grounded", + "glm45v", + "opencua", + "internvl", + "holo", + "moondream3", + "gemini", + "generic_vlm", + "uiins", + "gelato", + "uitars2", +] diff --git a/libs/python/agent/agent/loops/anthropic.py b/libs/python/agent/agent/loops/anthropic.py index 2a3dffd5..0fa08b96 100644 --- a/libs/python/agent/agent/loops/anthropic.py +++ b/libs/python/agent/agent/loops/anthropic.py @@ -107,12 +107,9 @@ async def _prepare_tools_for_anthropic(tool_schemas: List[Dict[str, Any]], model function_schema = schema["function"] anthropic_tools.append( { - "type": "function", - "function": { - "name": function_schema["name"], - "description": function_schema.get("description", ""), - "parameters": function_schema.get("parameters", {}), - }, + "name": function_schema["name"], + "description": function_schema.get("description", ""), + "input_schema": function_schema.get("parameters", {}), } ) @@ -666,11 +663,25 @@ def _convert_completion_to_responses_items(response: Any) -> List[Dict[str, Any] if content_item.get("type") == "text": responses_items.append(make_output_text_item(content_item.get("text", ""))) elif content_item.get("type") == "tool_use": - # Convert tool use to computer call + # Check if this is a custom function tool or computer tool + tool_name = content_item.get("name", "computer") tool_input = content_item.get("input", {}) - action_type = tool_input.get("action") call_id = content_item.get("id") + # Handle custom function tools (not computer tools) + if tool_name != "computer": + from ..responses import make_function_call_item + + responses_items.append( + make_function_call_item( + function_name=tool_name, arguments=tool_input, call_id=call_id + ) + ) + continue + + # Computer tool - process actions + action_type = tool_input.get("action") + # Action reference: # https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/computer-use-tool#available-actions @@ -868,6 +879,25 @@ def _convert_completion_to_responses_items(response: Any) -> List[Dict[str, Any] # Handle tool calls (alternative format) if hasattr(message, "tool_calls") and message.tool_calls: for tool_call in message.tool_calls: + tool_name = tool_call.function.name + + # Handle custom function tools + if tool_name != "computer": + from ..responses import make_function_call_item + + # tool_call.function.arguments is a JSON string, need to parse it + try: + args_dict = json.loads(tool_call.function.arguments) + except json.JSONDecodeError: + args_dict = {} + responses_items.append( + make_function_call_item( + function_name=tool_name, arguments=args_dict, call_id=tool_call.id + ) + ) + continue + + # Handle computer tool if tool_call.function.name == "computer": try: try: diff --git a/libs/python/agent/agent/loops/gelato.py b/libs/python/agent/agent/loops/gelato.py new file mode 100644 index 00000000..e3032472 --- /dev/null +++ b/libs/python/agent/agent/loops/gelato.py @@ -0,0 +1,183 @@ +""" +Gelato agent loop implementation for click prediction using litellm.acompletion +Model: https://huggingface.co/mlfoundations/Gelato-30B-A3B +Code: https://github.com/mlfoundations/Gelato/tree/main +""" + +import base64 +import math +import re +from io import BytesIO +from typing import Any, Dict, List, Optional, Tuple + +import litellm +from PIL import Image + +from ..decorators import register_agent +from ..loops.base import AsyncAgentConfig +from ..types import AgentCapability + +SYSTEM_PROMPT = """ +You are an expert UI element locator. Given a GUI image and a user's element description, provide the coordinates of the specified element as a single (x,y) point. For elements with area, return the center point. + +Output the coordinate pair exactly: +(x,y) +""" + + +def extract_coordinates(raw_string): + """ + Extract the coordinates from the raw string. + Args: + raw_string: str (e.g. "(100, 200)") + Returns: + x: float (e.g. 100.0) + y: float (e.g. 200.0) + """ + try: + matches = re.findall(r"\((-?\d*\.?\d+),\s*(-?\d*\.?\d+)\)", raw_string) + return [tuple(map(int, match)) for match in matches][0] + except: + return 0, 0 + + +def smart_resize( + height: int, + width: int, + factor: int = 28, + min_pixels: int = 3136, + max_pixels: int = 8847360, +) -> Tuple[int, int]: + """Smart resize function similar to qwen_vl_utils.""" + # Calculate the total pixels + total_pixels = height * width + + # If already within bounds, return original dimensions + if min_pixels <= total_pixels <= max_pixels: + # Round to nearest factor + new_height = (height // factor) * factor + new_width = (width // factor) * factor + return new_height, new_width + + # Calculate scaling factor + if total_pixels > max_pixels: + scale = (max_pixels / total_pixels) ** 0.5 + else: + scale = (min_pixels / total_pixels) ** 0.5 + + # Apply scaling + new_height = int(height * scale) + new_width = int(width * scale) + + # Round to nearest factor + new_height = (new_height // factor) * factor + new_width = (new_width // factor) * factor + + # Ensure minimum size + new_height = max(new_height, factor) + new_width = max(new_width, factor) + + return new_height, new_width + + +@register_agent(models=r".*Gelato.*") +class GelatoConfig(AsyncAgentConfig): + """Gelato agent configuration implementing AsyncAgentConfig protocol for click prediction.""" + + def __init__(self): + self.current_model = None + self.last_screenshot_b64 = None + + async def predict_step( + self, + messages: List[Dict[str, Any]], + model: str, + tools: Optional[List[Dict[str, Any]]] = None, + max_retries: Optional[int] = None, + stream: bool = False, + computer_handler=None, + _on_api_start=None, + _on_api_end=None, + _on_usage=None, + _on_screenshot=None, + **kwargs, + ) -> Dict[str, Any]: + raise NotImplementedError() + + async def predict_click( + self, model: str, image_b64: str, instruction: str, **kwargs + ) -> Optional[Tuple[float, float]]: + """ + Predict click coordinates using UI-Ins model via litellm.acompletion. + + Args: + model: The UI-Ins model name + image_b64: Base64 encoded image + instruction: Instruction for where to click + + Returns: + Tuple of (x, y) coordinates or None if prediction fails + """ + # Decode base64 image + image_data = base64.b64decode(image_b64) + image = Image.open(BytesIO(image_data)) + width, height = image.width, image.height + + # Smart resize the image (similar to qwen_vl_utils) + resized_height, resized_width = smart_resize( + height, + width, + factor=28, # Default factor for Qwen models + min_pixels=3136, + max_pixels=4096 * 2160, + ) + resized_image = image.resize((resized_width, resized_height)) + scale_x, scale_y = width / resized_width, height / resized_height + + # Convert resized image back to base64 + buffered = BytesIO() + resized_image.save(buffered, format="PNG") + resized_image_b64 = base64.b64encode(buffered.getvalue()).decode() + + # Prepare system and user messages + system_message = { + "role": "system", + "content": [{"type": "text", "text": SYSTEM_PROMPT.strip()}], + } + + user_message = { + "role": "user", + "content": [ + { + "type": "image_url", + "image_url": {"url": f"data:image/png;base64,{resized_image_b64}"}, + }, + {"type": "text", "text": instruction}, + ], + } + + # Prepare API call kwargs + api_kwargs = { + "model": model, + "messages": [system_message, user_message], + "max_tokens": 2056, + "temperature": 0.0, + **kwargs, + } + + # Use liteLLM acompletion + response = await litellm.acompletion(**api_kwargs) + + # Extract response text + output_text = response.choices[0].message.content # type: ignore + + # Extract and rescale coordinates + pred_x, pred_y = extract_coordinates(output_text) # type: ignore + pred_x *= scale_x + pred_y *= scale_y + + return (math.floor(pred_x), math.floor(pred_y)) + + def get_capabilities(self) -> List[AgentCapability]: + """Return the capabilities supported by this agent.""" + return ["click"] diff --git a/libs/python/agent/agent/loops/qwen.py b/libs/python/agent/agent/loops/generic_vlm.py similarity index 88% rename from libs/python/agent/agent/loops/qwen.py rename to libs/python/agent/agent/loops/generic_vlm.py index f21fba2c..4696234b 100644 --- a/libs/python/agent/agent/loops/qwen.py +++ b/libs/python/agent/agent/loops/generic_vlm.py @@ -20,6 +20,7 @@ from ..loops.base import AsyncAgentConfig from ..responses import ( convert_completion_messages_to_responses_items, convert_responses_items_to_completion_messages, + make_reasoning_item, ) from ..types import AgentCapability @@ -233,8 +234,8 @@ def convert_qwen_tool_args_to_computer_action(args: Dict[str, Any]) -> Optional[ return None -@register_agent(models=r"(?i).*qwen.*", priority=-1) -class Qwen3VlConfig(AsyncAgentConfig): +@register_agent(models=r"(?i).*", priority=-100) +class GenericVlmConfig(AsyncAgentConfig): async def predict_step( self, messages: List[Dict[str, Any]], @@ -373,13 +374,23 @@ class Qwen3VlConfig(AsyncAgentConfig): if _on_usage: await _on_usage(usage) - # Parse tool call from text; then convert to responses items via fake tool_calls + # Extract response data resp_dict = response.model_dump() # type: ignore choice = (resp_dict.get("choices") or [{}])[0] - content_text = ((choice.get("message") or {}).get("content")) or "" - tool_call = _parse_tool_call_from_text(content_text) + message = choice.get("message") or {} + content_text = message.get("content") or "" + tool_calls_array = message.get("tool_calls") or [] + reasoning_text = message.get("reasoning") or "" output_items: List[Dict[str, Any]] = [] + + # Add reasoning if present (Ollama Cloud format) + if reasoning_text: + output_items.append(make_reasoning_item(reasoning_text)) + + # Priority 1: Try to parse tool call from content text (OpenRouter format) + tool_call = _parse_tool_call_from_text(content_text) + if tool_call and isinstance(tool_call, dict): fn_name = tool_call.get("name") or "computer" raw_args = tool_call.get("arguments") or {} @@ -405,8 +416,50 @@ class Qwen3VlConfig(AsyncAgentConfig): ], } output_items.extend(convert_completion_messages_to_responses_items([fake_cm])) + elif tool_calls_array: + # Priority 2: Use tool_calls field if present (Ollama Cloud format) + # Process and unnormalize coordinates in tool calls + processed_tool_calls = [] + for tc in tool_calls_array: + function = tc.get("function", {}) + fn_name = function.get("name", "computer") + args_str = function.get("arguments", "{}") + + try: + args = json.loads(args_str) + + # Unnormalize coordinates if present + if "coordinate" in args and last_rw is not None and last_rh is not None: + args = await _unnormalize_coordinate(args, (last_rw, last_rh)) + + # Convert Qwen format to Computer Calls format if this is a computer tool + if fn_name == "computer": + converted_action = convert_qwen_tool_args_to_computer_action(args) + if converted_action: + args = converted_action + + processed_tool_calls.append( + { + "type": tc.get("type", "function"), + "id": tc.get("id", "call_0"), + "function": { + "name": fn_name, + "arguments": json.dumps(args), + }, + } + ) + except json.JSONDecodeError: + # Keep original if parsing fails + processed_tool_calls.append(tc) + + fake_cm = { + "role": "assistant", + "content": content_text if content_text else "", + "tool_calls": processed_tool_calls, + } + output_items.extend(convert_completion_messages_to_responses_items([fake_cm])) else: - # Fallback: just return assistant text + # No tool calls found in either format, return text response fake_cm = {"role": "assistant", "content": content_text} output_items.extend(convert_completion_messages_to_responses_items([fake_cm])) diff --git a/libs/python/agent/agent/loops/omniparser.py b/libs/python/agent/agent/loops/omniparser.py index e15dfc5b..f671dce2 100644 --- a/libs/python/agent/agent/loops/omniparser.py +++ b/libs/python/agent/agent/loops/omniparser.py @@ -365,6 +365,22 @@ class OmniparserConfig(AsyncAgentConfig): **kwargs, } + # Add Vertex AI specific parameters if using vertex_ai models + if llm_model.startswith("vertex_ai/"): + import os + + # Pass vertex_project and vertex_location to liteLLM + if "vertex_project" not in api_kwargs: + api_kwargs["vertex_project"] = os.getenv("GOOGLE_CLOUD_PROJECT") + if "vertex_location" not in api_kwargs: + api_kwargs["vertex_location"] = "global" + + # Pass through Gemini 3-specific parameters if provided + if "thinking_level" in kwargs: + api_kwargs["thinking_level"] = kwargs["thinking_level"] + if "media_resolution" in kwargs: + api_kwargs["media_resolution"] = kwargs["media_resolution"] + # Call API start hook if _on_api_start: await _on_api_start(api_kwargs) diff --git a/libs/python/agent/agent/loops/uiins.py b/libs/python/agent/agent/loops/uiins.py new file mode 100644 index 00000000..10956948 --- /dev/null +++ b/libs/python/agent/agent/loops/uiins.py @@ -0,0 +1,175 @@ +""" +UI-Ins agent loop implementation for click prediction using litellm.acompletion +Paper: https://arxiv.org/pdf/2510.202861 +Code: https://github.com/alibaba/UI-Ins +""" + +import asyncio +import base64 +import json +import math +import re +import uuid +from io import BytesIO +from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union + +import litellm +from PIL import Image + +from ..decorators import register_agent +from ..loops.base import AsyncAgentConfig +from ..types import AgentCapability, AgentResponse, Messages, Tools + +SYSTEM_PROMPT = """You are a GUI agent. You are given a task and your action history, with screenshots. You need to perform the next action to complete the task.\n\n## Output Format\nReturn a json object with a reasoning process in tags, a function name and arguments within XML tags:\n```\n\n...\n\n\n{"name": "grounding", "arguments": }\n\n```\n represents the following item of the action space:\n## Action Space{"action": "click", "coordinate": [x, y]}\nYour task is to accurately locate a UI element based on the instruction. You should first analyze instruction in tags and finally output the function in tags.\n""" + + +def parse_coordinates(raw_string: str) -> tuple[int, int]: + matches = re.findall(r"\[(\d+),\s*(\d+)\]", raw_string) + if matches: + return tuple(map(int, matches[0])) + return -1, -1 + + +def smart_resize( + height: int, + width: int, + factor: int = 28, + min_pixels: int = 3136, + max_pixels: int = 8847360, +) -> Tuple[int, int]: + """Smart resize function similar to qwen_vl_utils.""" + # Calculate the total pixels + total_pixels = height * width + + # If already within bounds, return original dimensions + if min_pixels <= total_pixels <= max_pixels: + # Round to nearest factor + new_height = (height // factor) * factor + new_width = (width // factor) * factor + return new_height, new_width + + # Calculate scaling factor + if total_pixels > max_pixels: + scale = (max_pixels / total_pixels) ** 0.5 + else: + scale = (min_pixels / total_pixels) ** 0.5 + + # Apply scaling + new_height = int(height * scale) + new_width = int(width * scale) + + # Round to nearest factor + new_height = (new_height // factor) * factor + new_width = (new_width // factor) * factor + + # Ensure minimum size + new_height = max(new_height, factor) + new_width = max(new_width, factor) + + return new_height, new_width + + +@register_agent(models=r".*UI-Ins.*") +class UIInsConfig(AsyncAgentConfig): + """UI-Ins agent configuration implementing AsyncAgentConfig protocol for click prediction.""" + + def __init__(self): + self.current_model = None + self.last_screenshot_b64 = None + + async def predict_step( + self, + messages: List[Dict[str, Any]], + model: str, + tools: Optional[List[Dict[str, Any]]] = None, + max_retries: Optional[int] = None, + stream: bool = False, + computer_handler=None, + _on_api_start=None, + _on_api_end=None, + _on_usage=None, + _on_screenshot=None, + **kwargs, + ) -> Dict[str, Any]: + raise NotImplementedError() + + async def predict_click( + self, model: str, image_b64: str, instruction: str, **kwargs + ) -> Optional[Tuple[float, float]]: + """ + Predict click coordinates using UI-Ins model via litellm.acompletion. + + Args: + model: The UI-Ins model name + image_b64: Base64 encoded image + instruction: Instruction for where to click + + Returns: + Tuple of (x, y) coordinates or None if prediction fails + """ + # Decode base64 image + image_data = base64.b64decode(image_b64) + image = Image.open(BytesIO(image_data)) + width, height = image.width, image.height + + # Smart resize the image (similar to qwen_vl_utils) + resized_height, resized_width = smart_resize( + height, + width, + factor=28, # Default factor for Qwen models + min_pixels=3136, + max_pixels=4096 * 2160, + ) + resized_image = image.resize((resized_width, resized_height)) + scale_x, scale_y = width / resized_width, height / resized_height + + # Convert resized image back to base64 + buffered = BytesIO() + resized_image.save(buffered, format="PNG") + resized_image_b64 = base64.b64encode(buffered.getvalue()).decode() + + # Prepare system and user messages + system_message = { + "role": "system", + "content": [ + {"type": "text", "text": "You are a helpful assistant."}, + {"type": "text", "text": SYSTEM_PROMPT}, + ], + } + + user_message = { + "role": "user", + "content": [ + { + "type": "image_url", + "image_url": {"url": f"data:image/png;base64,{resized_image_b64}"}, + }, + {"type": "text", "text": instruction}, + ], + } + + # Prepare API call kwargs + api_kwargs = { + "model": model, + "messages": [system_message, user_message], + "max_tokens": 2056, + "temperature": 0.0, + **kwargs, + } + + # Use liteLLM acompletion + response = await litellm.acompletion(**api_kwargs) + + # Extract response text + output_text = response.choices[0].message.content # type: ignore + + # Extract and rescale coordinates + pred_x, pred_y = parse_coordinates(output_text) # type: ignore + pred_x *= scale_x + pred_y *= scale_y + + return (math.floor(pred_x), math.floor(pred_y)) + + def get_capabilities(self) -> List[AgentCapability]: + """Return the capabilities supported by this agent.""" + return ["click"] diff --git a/libs/python/agent/agent/loops/uitars.py b/libs/python/agent/agent/loops/uitars.py index 5d532a41..9b359990 100644 --- a/libs/python/agent/agent/loops/uitars.py +++ b/libs/python/agent/agent/loops/uitars.py @@ -563,7 +563,7 @@ def convert_uitars_messages_to_litellm(messages: Messages) -> List[Dict[str, Any return litellm_messages -@register_agent(models=r"(?i).*ui-?tars.*") +@register_agent(models=r"(?i).*ui-?tars.*", priority=-1) class UITARSConfig: """ UITARS agent configuration using liteLLM for ByteDance-Seed/UI-TARS-1.5-7B model. diff --git a/libs/python/agent/agent/loops/uitars2.py b/libs/python/agent/agent/loops/uitars2.py new file mode 100644 index 00000000..4ecb3b04 --- /dev/null +++ b/libs/python/agent/agent/loops/uitars2.py @@ -0,0 +1,951 @@ +""" +UITARS-2 agent loop implementation using LiteLLM. +- Prepends a system prompt modeled after the training prompts in examples/seed_16_gui.ipynb +- Converts Responses items -> completion messages +- Calls litellm.acompletion +- Parses ... outputs back into Responses items (computer actions) +""" + +from __future__ import annotations + +import base64 +import io +import json +import re +from typing import Any, Dict, List, Optional, Tuple + +import litellm +from litellm.responses.litellm_completion_transformation.transformation import ( + LiteLLMCompletionResponsesConfig, +) + +from ..decorators import register_agent +from .omniparser import get_last_computer_call_output # type: ignore + +try: + from PIL import Image # type: ignore +except Exception: # pragma: no cover + Image = None # type: ignore +from ..responses import ( + convert_responses_items_to_completion_messages, + make_click_item, + make_double_click_item, + make_drag_item, + make_function_call_item, + make_keypress_item, + make_move_item, + make_output_text_item, + make_reasoning_item, + make_screenshot_item, + make_scroll_item, + make_type_item, + make_wait_item, +) +from ..types import AgentCapability + +TOOL_SCHEMAS: List[Dict[str, Any]] = [ + { + "type": "function", + "name": "open_computer", + "parameters": {}, + "description": "Open computer.", + }, + { + "type": "function", + "name": "click", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Click coordinates. The format is: x y", + } + }, + "required": ["point"], + }, + "description": "Mouse left single click action.", + }, + { + "type": "function", + "name": "left_double", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Click coordinates. The format is: x y", + } + }, + "required": ["point"], + }, + "description": "Mouse left double click action.", + }, + { + "type": "function", + "name": "right_single", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Click coordinates. The format is: x y", + } + }, + "required": ["point"], + }, + "description": "Mouse right single click action.", + }, + { + "type": "function", + "name": "scroll", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Scroll start position. If not specified, default to execute on the current mouse position. The format is: x y", + }, + "direction": { + "type": "string", + "description": "Scroll direction.", + "enum": ["up", "down", "left", "right"], + }, + }, + "required": ["direction"], + }, + "description": "Scroll action.", + }, + { + "type": "function", + "name": "move_to", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Target coordinates. The format is: x y", + } + }, + "required": ["point"], + }, + "description": "Mouse move action.", + }, + { + "type": "function", + "name": "hotkey", + "parameters": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Hotkeys you want to press. Split keys with a space and use lowercase.", + } + }, + "required": ["key"], + }, + "description": "Press hotkey.", + }, + { + "type": "function", + "name": "finished", + "parameters": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "Provide the final answer or response to complete the task.", + } + }, + "required": [], + }, + "description": "This function is used to indicate the completion of a task by providing the final answer or response.", + }, + { + "type": "function", + "name": "press", + "parameters": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key you want to press. Only one key can be pressed at one time.", + } + }, + "required": ["key"], + }, + "description": "Press key.", + }, + { + "type": "function", + "name": "release", + "parameters": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key you want to release. Only one key can be released at one time.", + } + }, + "required": ["key"], + }, + "description": "Release key.", + }, + { + "type": "function", + "name": "mouse_down", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Mouse down position. If not specified, default to execute on the current mouse position. The format is: x y", + }, + "button": { + "type": "string", + "description": "Down button. Default to left.", + "enum": ["left", "right"], + }, + }, + "required": [], + }, + "description": "Mouse down action.", + }, + { + "type": "function", + "name": "mouse_up", + "parameters": { + "type": "object", + "properties": { + "point": { + "type": "string", + "description": "Mouse up position. If not specified, default to execute on the current mouse position. The format is: x y", + }, + "button": { + "type": "string", + "description": "Up button. Default to left.", + "enum": ["left", "right"], + }, + }, + "required": [], + }, + "description": "Mouse up action.", + }, + { + "type": "function", + "name": "call_user", + "parameters": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "Message or information displayed to the user to request their input, feedback, or guidance.", + } + }, + "required": [], + }, + "description": "This function is used to interact with the user by displaying a message and requesting their input, feedback, or guidance.", + }, + { + "type": "function", + "name": "wait", + "parameters": { + "type": "object", + "properties": {"time": {"type": "integer", "description": "Wait time in seconds."}}, + "required": [], + }, + "description": "Wait for a while.", + }, + { + "type": "function", + "name": "drag", + "parameters": { + "type": "object", + "properties": { + "start_point": { + "type": "string", + "description": "Drag start point. The format is: x y", + }, + "end_point": { + "type": "string", + "description": "Drag end point. The format is: x y", + }, + }, + "required": ["start_point", "end_point"], + }, + "description": "Mouse left button drag action.", + }, + { + "type": "function", + "name": "type", + "parameters": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "Type content. If you want to submit your input, use \\n at the end of content.", + } + }, + "required": ["content"], + }, + "description": "Type content.", + }, + { + "type": "function", + "name": "take_screenshot", + "parameters": {}, + "description": "Take screenshot.", + }, +] + + +def _format_tool_schemas_json_lines(schemas: List[Dict[str, Any]]) -> str: + # Nicely formatted: pretty JSON with indentation, separated by blank lines + return "\n\n".join(json.dumps(s, ensure_ascii=False, indent=2) for s in schemas) + "\n\n" + + +_PROMPT_PREFIX = ( + "You should begin by detailing the internal reasoning process, and then present the answer to the user. " + "The reasoning process should be enclosed within " + " tags, as follows:\n" + " reasoning process here " + " answer here.\n\n" + "You have different modes of thinking:\n" + "Unrestricted think mode: Engage in an internal thinking process with thorough reasoning and reflections. " + "You have an unlimited budget for thinking tokens and can continue thinking until you fully solve the problem.\n" + "Efficient think mode: Provide a concise internal thinking process with efficient reasoning and reflections. " + "You don't have a strict token budget but be less verbose and more direct in your thinking.\n" + "No think mode: Respond directly to the question without any internal reasoning process or extra thinking tokens. " + "Still follow the template with the minimum required thinking tokens to justify the answer.\n" + "Budgeted think mode: Limit your internal reasoning and reflections to stay within the specified token budget\n\n" + "Based on the complexity of the problem, select the appropriate mode for reasoning among the provided options listed below.\n\n" + "Provided Mode(s):\nEfficient think.\n\n" + "You are provided with a task description, a history of previous actions, and corresponding screenshots. " + "Your goal is to perform the next action to complete the task. " + "If performing the same action multiple times results in a static screen with no changes, attempt a modified or alternative action.\n\n" + "## Function Definition\n\n" + "- You have access to the following functions:\n\n" +) + +_PROMPT_SUFFIX = ( + "- To call a function, use the following structure without any suffix:\n\n" + " reasoning process \n" + "value_1" + "multiline...\n\n\n" + "## Important Notes\n" + "- Function calls must begin with .\n" + "- All required parameters must be explicitly provided.\n" + "\n## Additional Notes\n" + "- You can execute multiple actions within a single tool call. For example:\n" + "value_1\n" + "This is the value for the second parameter\nthat can span\nmultiple lines\n" + "value_4" +) + + +SYSTEM_PROMPT = _PROMPT_PREFIX + _format_tool_schemas_json_lines(TOOL_SCHEMAS) + _PROMPT_SUFFIX + + +def _extract_function_schemas_from_tools( + tools: Optional[List[Dict[str, Any]]], +) -> List[Dict[str, Any]]: + schemas: List[Dict[str, Any]] = [] + if not tools: + return schemas + for t in tools: + if t.get("type") == "function": + fn = t.get("function", {}) + name = fn.get("name") + params = fn.get("parameters", {}) + desc = fn.get("description", "") + if name: + schemas.append( + { + "type": "function", + "name": name, + "parameters": params if isinstance(params, dict) else {}, + "description": desc, + } + ) + return schemas + + +def _parse_seed_tool_calls(text: str) -> List[Dict[str, Any]]: + """Parse blocks into a list of {function, parameters} dicts. + Also captures optional ... as reasoning. + """ + actions: List[Dict[str, Any]] = [] + if not text: + return actions + + # Extract reasoning if present + reasoning_text = None + think_match = re.search(r"([\s\S]*?)", text) + if think_match: + reasoning_text = think_match.group(1).strip() + + # Iterate each seed tool_call block + for block in re.finditer(r"([\s\S]*?)", text): + content = block.group(1) + # One or multiple ... inside + for fmatch in re.finditer(r"([\s\S]*?)", content): + fname = fmatch.group(1) + inner = fmatch.group(2) + params: Dict[str, str] = {} + for pmatch in re.finditer(r"([\s\S]*?)", inner): + pname = pmatch.group(1) + pval = pmatch.group(2).strip() + params[pname] = pval + actions.append({"function": fname, "parameters": params}) + + # If we have a global reasoning and at least one action, attach it to first + if reasoning_text and actions: + actions[0]["reasoning"] = reasoning_text + elif reasoning_text: + actions.append({"function": "reasoning", "parameters": {"content": reasoning_text}}) + + return actions + + +def _normalize_xy_to_uitars(x: int, y: int, width: int, height: int) -> Tuple[int, int]: + width = max(1, int(width)) + height = max(1, int(height)) + nx = max(0, min(1000, int(round((x / width) * 1000)))) + ny = max(0, min(1000, int(round((y / height) * 1000)))) + return nx, ny + + +def _denormalize_xy_from_uitars(nx: float, ny: float, width: int, height: int) -> Tuple[int, int]: + width = max(1, int(width)) + height = max(1, int(height)) + x = int(round((nx / 1000.0) * width)) + y = int(round((ny / 1000.0) * height)) + return x, y + + +def _map_computer_action_to_function( + action: Dict[str, Any], width: int, height: int +) -> Optional[Dict[str, Any]]: + """Map a computer action item to a UITARS function + parameters dict of strings. + Returns dict like {"function": name, "parameters": {..}} or None if unknown. + """ + atype = action.get("type") or action.get("action") + if atype == "click": + x, y = action.get("x"), action.get("y") + btn = action.get("button", "left") + if x is None or y is None: + return None + nx, ny = _normalize_xy_to_uitars(int(x), int(y), width, height) + if btn == "right": + return { + "function": "right_single", + "parameters": {"point": f"{nx} {ny}"}, + } + return {"function": "click", "parameters": {"point": f"{nx} {ny}"}} + if atype == "double_click": + x, y = action.get("x"), action.get("y") + if x is None or y is None: + return None + nx, ny = _normalize_xy_to_uitars(int(x), int(y), width, height) + return {"function": "left_double", "parameters": {"point": f"{nx} {ny}"}} + if atype == "move": + x, y = action.get("x"), action.get("y") + if x is None or y is None: + return None + nx, ny = _normalize_xy_to_uitars(int(x), int(y), width, height) + return {"function": "move_to", "parameters": {"point": f"{nx} {ny}"}} + if atype == "keypress": + keys = action.get("keys", []) + if isinstance(keys, list) and keys: + if len(keys) == 1: + return {"function": "press", "parameters": {"key": keys[0]}} + else: + return {"function": "hotkey", "parameters": {"key": " ".join(keys)}} + return None + if atype == "type": + text = action.get("text", "") + return {"function": "type", "parameters": {"content": text}} + if atype == "scroll": + x, y = action.get("x", 512), action.get("y", 512) + nx, ny = _normalize_xy_to_uitars(int(x), int(y), width, height) + sx, sy = action.get("scroll_x", 0), action.get("scroll_y", 0) + # Our parser used positive sy for up + direction = ( + "up" + if sy and sy > 0 + else ( + "down" + if sy and sy < 0 + else ("right" if sx and sx > 0 else ("left" if sx and sx < 0 else "down")) + ) + ) + return { + "function": "scroll", + "parameters": {"direction": direction, "point": f"{nx} {ny}"}, + } + if atype == "drag": + path = action.get("path", []) + if isinstance(path, list) and len(path) >= 2: + sx, sy = path[0].get("x"), path[0].get("y") + ex, ey = path[-1].get("x"), path[-1].get("y") + if sx is None or sy is None or ex is None or ey is None: + return None + nsx, nsy = _normalize_xy_to_uitars(int(sx), int(sy), width, height) + nex, ney = _normalize_xy_to_uitars(int(ex), int(ey), width, height) + return { + "function": "drag", + "parameters": { + "start_point": f"{nsx} {nsy}", + "end_point": f"{nex} {ney}", + }, + } + return None + if atype == "wait": + return {"function": "wait", "parameters": {}} + if atype == "screenshot": + return {"function": "take_screenshot", "parameters": {}} + # Fallback unknown + return None + + +def _to_uitars_messages( + messages: List[Dict[str, Any]], width: int, height: int +) -> List[Dict[str, Any]]: + """Convert responses items into completion messages tailored for UI-TARS. + + - User content is passed through similar to convert_responses_items_to_completion_messages + - Assistant/tool history is rendered as text with and blocks + """ + uitars_messages: List[Dict[str, Any]] = [] + + def flush_seed_block(pending_think: Optional[str], pending_functions: List[Dict[str, Any]]): + if not pending_think and not pending_functions: + return + parts: List[str] = [] + if pending_think: + parts.append(f" {pending_think} ") + if pending_functions: + inner = [] + for f in pending_functions: + fname = f["function"] + params = f.get("parameters", {}) + param_blocks = [] + for k, v in params.items(): + param_blocks.append(f"{v}") + inner.append(f"{''.join(param_blocks)}") + parts.append(f"{''.join(inner)}") + uitars_messages.append({"role": "assistant", "content": "".join(parts)}) + + # Accumulators for a single assistant seed block + pending_think: Optional[str] = None + pending_functions: List[Dict[str, Any]] = [] + + for msg in messages: + mtype = msg.get("type") + role = msg.get("role") + + # On any user message, flush current assistant block + if role == "user" or mtype == "user": + flush_seed_block(pending_think, pending_functions) + pending_think, pending_functions = None, [] + + content = msg.get("content", "") + if isinstance(content, list): + completion_content = [] + for item in content: + if item.get("type") == "input_image": + completion_content.append( + {"type": "image_url", "image_url": {"url": item.get("image_url")}} + ) + elif item.get("type") in ("input_text", "text"): + completion_content.append({"type": "text", "text": item.get("text")}) + uitars_messages.append({"role": "user", "content": completion_content}) + elif isinstance(content, str): + uitars_messages.append({"role": "user", "content": content}) + continue + + # Reasoning item + if mtype == "reasoning": + # Responses reasoning stores summary list + summary = msg.get("summary", []) + texts = [ + s.get("text", "") + for s in summary + if isinstance(s, dict) and s.get("type") == "summary_text" + ] + if texts: + pending_think = "\n".join([t for t in texts if t]) + continue + + # Computer/tool calls -> map to functions + if mtype == "computer_call": + f = _map_computer_action_to_function(msg.get("action", {}), width, height) + if f: + pending_functions.append(f) + continue + if mtype == "function_call": + # Include custom tools as-is + name = msg.get("name") + try: + args_obj = json.loads(msg.get("arguments", "{}")) + except json.JSONDecodeError: + args_obj = {} + # Ensure string values + params = {k: (str(v) if not isinstance(v, str) else v) for k, v in args_obj.items()} + pending_functions.append({"function": name, "parameters": params}) + continue + + # If assistant message text is given, flush current block and add as plain assistant text + if role == "assistant" or mtype == "message": + flush_seed_block(pending_think, pending_functions) + pending_think, pending_functions = None, [] + content = msg.get("content", []) + if isinstance(content, list): + texts = [ + c.get("text", "") + for c in content + if isinstance(c, dict) and c.get("type") in ("output_text", "text") + ] + if texts: + uitars_messages.append( + {"role": "assistant", "content": "\n".join([t for t in texts if t])} + ) + elif isinstance(content, str) and content: + uitars_messages.append({"role": "assistant", "content": content}) + continue + + # On outputs, flush pending assistant block and send outputs as user messages + if mtype in ("function_call_output", "computer_call_output"): + flush_seed_block(pending_think, pending_functions) + pending_think, pending_functions = None, [] + output = msg.get("output") + if isinstance(output, dict) and output.get("type") == "input_image": + img_url = output.get("image_url") + if img_url: + uitars_messages.append( + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": img_url}}, + ], + } + ) + elif isinstance(output, str): + uitars_messages.append({"role": "user", "content": output}) + else: + # Fallback stringify + uitars_messages.append({"role": "user", "content": json.dumps(output)}) + continue + + # Flush any remaining pending seed block + flush_seed_block(pending_think, pending_functions) + + return uitars_messages + + +def _to_response_items( + actions: List[Dict[str, Any]], + tool_names: Optional[set[str]] = None, + width: Optional[int] = None, + height: Optional[int] = None, +) -> List[Any]: + """Map parsed actions into Responses items (computer actions + optional reasoning).""" + items: List[Any] = [] + tool_names = tool_names or set() + + # Optional top-level reasoning attached to first + if actions and actions[0].get("reasoning"): + items.append(make_reasoning_item(actions[0]["reasoning"])) + + # Dimensions default + w = int(width) if width else 1024 + h = int(height) if height else 768 + + for a in actions: + fn = a.get("function") + params = a.get("parameters", {}) + if fn == "reasoning": + items.append(make_reasoning_item(params.get("content", ""))) + elif fn in ("click", "left_double", "right_single"): + # params.point is like: x y or plain "x y" + point = params.get("point", "").strip() + m = re.search(r"([\-\d\.]+)\s+([\-\d\.]+)", point) + if not m: + continue + nx = float(m.group(1)) + ny = float(m.group(2)) + x, y = _denormalize_xy_from_uitars(nx, ny, w, h) + if fn == "left_double": + items.append(make_double_click_item(x, y)) + elif fn == "right_single": + items.append(make_click_item(x, y, "right")) + else: + items.append(make_click_item(x, y, "left")) + elif fn == "move_to": + point = params.get("point", "").strip() + m = re.search(r"([\-\d\.]+)\s+([\-\d\.]+)", point) + if not m: + continue + nx = float(m.group(1)) + ny = float(m.group(2)) + x, y = _denormalize_xy_from_uitars(nx, ny, w, h) + items.append(make_move_item(x, y)) + elif fn == "drag": + sp = params.get("start_point", "").strip() + ep = params.get("end_point", "").strip() + ms = re.search(r"([\-\d\.]+)\s+([\-\d\.]+)", sp) + me = re.search(r"([\-\d\.]+)\s+([\-\d\.]+)", ep) + if not (ms and me): + continue + nsx, nsy = float(ms.group(1)), float(ms.group(2)) + nex, ney = float(me.group(1)), float(me.group(2)) + sx, sy = _denormalize_xy_from_uitars(nsx, nsy, w, h) + ex, ey = _denormalize_xy_from_uitars(nex, ney, w, h) + items.append(make_drag_item([{"x": sx, "y": sy}, {"x": ex, "y": ey}])) + elif fn == "hotkey": + key = params.get("key", "") + keys = key.split() + if keys: + items.append(make_keypress_item(keys)) + elif fn == "press": + key = params.get("key", "") + if key: + items.append(make_keypress_item([key])) + elif fn == "type": + content = params.get("content", "") + items.append(make_type_item(content)) + elif fn == "scroll": + # direction: up/down/left/right. Point optional + direction = params.get("direction", "down").lower() + point = params.get("point", "") + m = re.search(r"([\-\d\.]+)\s+([\-\d\.]+)", point) + if m: + nx = float(m.group(1)) + ny = float(m.group(2)) + x, y = _denormalize_xy_from_uitars(nx, ny, w, h) + else: + x, y = _denormalize_xy_from_uitars(500.0, 500.0, w, h) + dy = 5 if direction == "up" else -5 + dx = 5 if direction == "right" else (-5 if direction == "left" else 0) + items.append(make_scroll_item(x, y, dx, dy)) + elif fn == "wait": + items.append(make_wait_item()) + elif fn == "finished": + content = params.get("content", "") + items.append(make_output_text_item(content or "Task completed.")) + break + elif fn == "take_screenshot": + items.append(make_screenshot_item()) + elif fn == "open_computer": + items.append(make_screenshot_item()) + else: + # If this function name is present in provided tool schemas, emit function_call + if fn in tool_names: + # Convert simple string params into an arguments object + # Parameters are strings; pass through as-is + items.append(make_function_call_item(fn, params)) + else: + # Unknown function -> surface as assistant text + items.append(make_output_text_item(f"Unknown action: {fn} {params}")) + + return items + + +@register_agent(models=r"(?i).*ui-?tars-?2.*") +class UITARS2Config: + async def predict_step( + self, + messages: List[Dict[str, Any]], + model: str, + tools: Optional[List[Dict[str, Any]]] = None, + max_retries: Optional[int] = None, + stream: bool = False, + computer_handler=None, + use_prompt_caching: Optional[bool] = False, + _on_api_start=None, + _on_api_end=None, + _on_usage=None, + _on_screenshot=None, + **kwargs, + ) -> Dict[str, Any]: + # Determine screen dimensions (prefer computer_handler, fallback to last screenshot) + width: Optional[int] = None + height: Optional[int] = None + if computer_handler is not None and hasattr(computer_handler, "get_dimensions"): + try: + dims = await computer_handler.get_dimensions() # type: ignore + if isinstance(dims, (list, tuple)) and len(dims) == 2: + width, height = int(dims[0]), int(dims[1]) + except Exception: + pass + + if width is None or height is None: + try: + last_out = get_last_computer_call_output(messages) # type: ignore + if last_out: + image_url = last_out.get("output", {}).get("image_url", "") + if image_url: + b64 = image_url.split(",")[-1] + img_bytes = base64.b64decode(b64) + if Image is not None: + img = Image.open(io.BytesIO(img_bytes)) + width, height = img.size + except Exception: + pass + + if width is None or height is None: + width, height = 1024, 768 + + # Convert Responses items to UI-TARS style messages with history + completion_messages = _to_uitars_messages(messages, width, height) + + # Build dynamic system prompt by concatenating built-in schemas and provided function tools + provided_fn_schemas = _extract_function_schemas_from_tools(tools) + combined_schemas = ( + TOOL_SCHEMAS + provided_fn_schemas if provided_fn_schemas else TOOL_SCHEMAS + ) + dynamic_system_prompt = ( + _PROMPT_PREFIX + _format_tool_schemas_json_lines(combined_schemas) + _PROMPT_SUFFIX + ) + + # Prepend system prompt (based on training prompts + provided tools) + litellm_messages: List[Dict[str, Any]] = [ + {"role": "system", "content": dynamic_system_prompt}, + ] + litellm_messages.extend(completion_messages) + + api_kwargs: Dict[str, Any] = { + "model": model, + "messages": litellm_messages, + "max_retries": max_retries, + "stream": stream, + **{k: v for k, v in kwargs.items()}, + } + if use_prompt_caching: + api_kwargs["use_prompt_caching"] = use_prompt_caching + + if _on_api_start: + await _on_api_start(api_kwargs) + + response = await litellm.acompletion(**api_kwargs) + + if _on_api_end: + await _on_api_end(api_kwargs, response) + + usage = { + **LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage( # type: ignore + response.usage + ).model_dump(), + "response_cost": response._hidden_params.get("response_cost", 0.0), + } + if _on_usage: + await _on_usage(usage) + + # Extract text content (first choice) + response_dict = response.model_dump() # type: ignore + content_text = "" + choices = response_dict.get("choices", []) + if choices: + msg = choices[0].get("message", {}) + # message.content may be string or array; gather text pieces + mc = msg.get("content") + if isinstance(mc, str): + content_text = mc + elif isinstance(mc, list): + parts = [] + for part in mc: + if isinstance(part, dict) and part.get("type") == "text": + parts.append(part.get("text", "")) + content_text = "\n".join([p for p in parts if p]) + + # Parse the seed tool calls and map to response items + actions = _parse_seed_tool_calls(content_text) + # Build set of tool names from provided tools to emit function_call items + tool_names: set[str] = set() + for s in provided_fn_schemas: + name = s.get("name") + if isinstance(name, str): + tool_names.add(name) + output_items = _to_response_items(actions, tool_names, width, height) + + return {"output": output_items, "usage": usage} + + def get_capabilities(self) -> List[AgentCapability]: + return ["step"] + + async def predict_click( + self, model: str, image_b64: str, instruction: str, **kwargs + ) -> Optional[Tuple[int, int]]: + """Predict a single click coordinate using a minimal prompt with a click tool. + + This sends the current screenshot and instruction, asking the model to + output a click action in the form: + Action: click(point='(x,y)') + """ + # Minimal grounding-style prompt + system_text = ( + "You are a GUI agent. Given the instruction, return a single action on the current screen.\n\n" + "## Output Format\n\n" + "Action: click(point='(x,y)')\n\n" + "## User Instruction\n" + f"{instruction}" + ) + + # Build messages with image + litellm_messages: List[Dict[str, Any]] = [ + {"role": "system", "content": system_text}, + { + "role": "user", + "content": [ + {"type": "text", "text": "Please return a single click action."}, + { + "type": "image_url", + "image_url": {"url": f"data:image/png;base64,{image_b64}"}, + }, + ], + }, + ] + + api_kwargs: Dict[str, Any] = { + "model": model, + "messages": litellm_messages, + "max_tokens": kwargs.get("max_tokens", 512), + "temperature": kwargs.get("temperature", 0.0), + "do_sample": kwargs.get("temperature", 0.0) > 0.0, + } + api_kwargs.update( + {k: v for k, v in (kwargs or {}).items() if k not in ["max_tokens", "temperature"]} + ) + + response = await litellm.acompletion(**api_kwargs) + # Extract response content + response_dict = response.model_dump() # type: ignore + choices = response_dict.get("choices", []) + if not choices: + return None + msg = choices[0].get("message", {}) + content_text = msg.get("content", "") + if isinstance(content_text, list): + text_parts = [ + p.get("text", "") + for p in content_text + if isinstance(p, dict) and p.get("type") == "text" + ] + content_text = "\n".join([t for t in text_parts if t]) + if not isinstance(content_text, str): + return None + + # Parse coordinates + # Pattern for click(point='(x,y)') or click(start_box='(x,y)') + patterns = [ + r"click\(point='\((\d+),(\d+)\)'\)", + r"click\((?:start_box|point)='\((\d+),(\d+)\)'\)", + ] + for pat in patterns: + m = re.search(pat, content_text) + if m: + try: + x, y = int(m.group(1)), int(m.group(2)) + return (x, y) + except Exception: + pass + return None diff --git a/libs/python/agent/agent/proxy/examples.py b/libs/python/agent/agent/proxy/examples.py index dfe6b87c..67aa8fb0 100644 --- a/libs/python/agent/agent/proxy/examples.py +++ b/libs/python/agent/agent/proxy/examples.py @@ -22,14 +22,14 @@ async def test_http_endpoint(): # Example 1: Simple text request simple_request = { - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": "Tell me a three sentence bedtime story about a unicorn.", "env": {"ANTHROPIC_API_KEY": anthropic_api_key}, } # Example 2: Multi-modal request with image multimodal_request = { - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": [ { "role": "user", @@ -47,7 +47,7 @@ async def test_http_endpoint(): # Example 3: Request with custom agent and computer kwargs custom_request = { - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": "Take a screenshot and tell me what you see", "env": {"ANTHROPIC_API_KEY": anthropic_api_key}, } @@ -95,7 +95,7 @@ def curl_examples(): """curl http://localhost:8000/responses \\ -H "Content-Type: application/json" \\ -d '{ - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": "Tell me a three sentence bedtime story about a unicorn." }'""" ) @@ -105,7 +105,7 @@ def curl_examples(): """curl http://localhost:8000/responses \\ -H "Content-Type: application/json" \\ -d '{ - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": [ { "role": "user", @@ -126,7 +126,7 @@ def curl_examples(): """curl http://localhost:8000/responses \\ -H "Content-Type: application/json" \\ -d '{ - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": "Take a screenshot and tell me what you see", "agent_kwargs": { "save_trajectory": true, @@ -166,7 +166,7 @@ async def test_p2p_client(): # Send a test request request = { - "model": "anthropic/claude-3-5-sonnet-20241022", + "model": "anthropic/claude-sonnet-4-5-20250929", "input": "Hello from P2P client!", } await connection.send(json.dumps(request)) diff --git a/libs/python/agent/agent/responses.py b/libs/python/agent/agent/responses.py index bbb1975d..1018021c 100644 --- a/libs/python/agent/agent/responses.py +++ b/libs/python/agent/agent/responses.py @@ -442,7 +442,9 @@ def get_all_element_descriptions(responses_items: List[Dict[str, Any]]) -> List[ # Conversion functions between responses_items and completion messages formats def convert_responses_items_to_completion_messages( - messages: List[Dict[str, Any]], allow_images_in_tool_results: bool = True + messages: List[Dict[str, Any]], + allow_images_in_tool_results: bool = True, + send_multiple_user_images_per_parallel_tool_results: bool = False, ) -> List[Dict[str, Any]]: """Convert responses_items message format to liteLLM completion format. @@ -450,10 +452,11 @@ def convert_responses_items_to_completion_messages( messages: List of responses_items format messages allow_images_in_tool_results: If True, include images in tool role messages. If False, send tool message + separate user message with image. + send_multiple_user_images_per_parallel_tool_results: If True, send multiple user images in parallel tool results. """ completion_messages = [] - for message in messages: + for i, message in enumerate(messages): msg_type = message.get("type") role = message.get("role") @@ -561,6 +564,14 @@ def convert_responses_items_to_completion_messages( } ) else: + # Determine if the next message is also a tool call output + next_type = None + if i + 1 < len(messages): + next_msg = messages[i + 1] + next_type = next_msg.get("type") + is_next_message_image_result = next_type in [ + "computer_call_output", + ] # Send tool message + separate user message with image (OpenAI compatible) completion_messages += [ { @@ -574,6 +585,12 @@ def convert_responses_items_to_completion_messages( {"type": "image_url", "image_url": {"url": output.get("image_url")}} ], }, + ] if send_multiple_user_images_per_parallel_tool_results or (not is_next_message_image_result) else [ + { + "role": "tool", + "tool_call_id": call_id, + "content": "[Execution completed. See screenshot below]", + }, ] else: # Handle text output as tool response diff --git a/libs/python/agent/agent/ui/gradio/app.py b/libs/python/agent/agent/ui/gradio/app.py index 1a2fb023..cb923bf5 100644 --- a/libs/python/agent/agent/ui/gradio/app.py +++ b/libs/python/agent/agent/ui/gradio/app.py @@ -6,9 +6,9 @@ with an advanced UI for model selection and configuration. Supported Agent Models: - OpenAI: openai/computer-use-preview -- Anthropic: anthropic/claude-3-5-sonnet-20241022, anthropic/claude-3-7-sonnet-20250219 +- Anthropic: anthropic/claude-sonnet-4-5-20250929, anthropic/claude-3-7-sonnet-20250219 - UI-TARS: huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B -- Omniparser: omniparser+anthropic/claude-3-5-sonnet-20241022, omniparser+ollama_chat/gemma3 +- Omniparser: omniparser+anthropic/claude-sonnet-4-5-20250929, omniparser+ollama_chat/gemma3 Requirements: - Mac with Apple Silicon (M1/M2/M3/M4), Linux, or Windows @@ -116,14 +116,12 @@ MODEL_MAPPINGS = { "Anthropic: Claude 4 Opus (20250514)": "anthropic/claude-opus-4-20250514", "Anthropic: Claude 4 Sonnet (20250514)": "anthropic/claude-sonnet-4-20250514", "Anthropic: Claude 3.7 Sonnet (20250219)": "anthropic/claude-3-7-sonnet-20250219", - "Anthropic: Claude 3.5 Sonnet (20241022)": "anthropic/claude-3-5-sonnet-20241022", }, "omni": { "default": "omniparser+openai/gpt-4o", "OMNI: OpenAI GPT-4o": "omniparser+openai/gpt-4o", "OMNI: OpenAI GPT-4o mini": "omniparser+openai/gpt-4o-mini", "OMNI: Claude 3.7 Sonnet (20250219)": "omniparser+anthropic/claude-3-7-sonnet-20250219", - "OMNI: Claude 3.5 Sonnet (20241022)": "omniparser+anthropic/claude-3-5-sonnet-20241022", }, "uitars": { "default": "huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B" if is_mac else "ui-tars", diff --git a/libs/python/agent/agent/ui/gradio/ui_components.py b/libs/python/agent/agent/ui/gradio/ui_components.py index d14f49a9..309dfb6c 100644 --- a/libs/python/agent/agent/ui/gradio/ui_components.py +++ b/libs/python/agent/agent/ui/gradio/ui_components.py @@ -44,13 +44,11 @@ def create_gradio_ui() -> gr.Blocks: "Anthropic: Claude 4 Opus (20250514)", "Anthropic: Claude 4 Sonnet (20250514)", "Anthropic: Claude 3.7 Sonnet (20250219)", - "Anthropic: Claude 3.5 Sonnet (20241022)", ] omni_models = [ "OMNI: OpenAI GPT-4o", "OMNI: OpenAI GPT-4o mini", "OMNI: Claude 3.7 Sonnet (20250219)", - "OMNI: Claude 3.5 Sonnet (20241022)", ] # Check if API keys are available diff --git a/libs/python/agent/example.py b/libs/python/agent/example.py index b02ccbfd..b8f41083 100644 --- a/libs/python/agent/example.py +++ b/libs/python/agent/example.py @@ -102,7 +102,7 @@ async def main(): # model="anthropic/claude-opus-4-20250514", # model="anthropic/claude-sonnet-4-20250514", # model="anthropic/claude-3-7-sonnet-20250219", - # model="anthropic/claude-3-5-sonnet-20241022", + # model="anthropic/claude-sonnet-4-5-20250929", # == UI-TARS == # model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", # TODO: add local mlx provider diff --git a/libs/python/agent/pyproject.toml b/libs/python/agent/pyproject.toml index fbb4bc9b..8e187716 100644 --- a/libs/python/agent/pyproject.toml +++ b/libs/python/agent/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "pdm.backend" [project] name = "cua-agent" -version = "0.4.37" +version = "0.5.1" description = "CUA (Computer Use) Agent for AI-driven computer interaction" readme = "README.md" authors = [ @@ -100,8 +100,6 @@ all = [ "python-dotenv>=1.0.1", # cli requirements "yaspin>=3.1.0", - # hud requirements - "hud-python==0.4.52", # gemini requirements "google-genai>=1.41.0", # qwen requirements diff --git a/libs/python/agent/tests/conftest.py b/libs/python/agent/tests/conftest.py index 8270c8e0..d60c1c54 100644 --- a/libs/python/agent/tests/conftest.py +++ b/libs/python/agent/tests/conftest.py @@ -24,7 +24,7 @@ def mock_litellm(): "id": "chatcmpl-test123", "object": "chat.completion", "created": 1234567890, - "model": kwargs.get("model", "anthropic/claude-3-5-sonnet-20241022"), + "model": kwargs.get("model", "anthropic/claude-sonnet-4-5-20250929"), "choices": [ { "index": 0, diff --git a/libs/python/agent/tests/test_computer_agent.py b/libs/python/agent/tests/test_computer_agent.py index 936c984c..b6de1e86 100644 --- a/libs/python/agent/tests/test_computer_agent.py +++ b/libs/python/agent/tests/test_computer_agent.py @@ -18,18 +18,18 @@ class TestComputerAgentInitialization: """Test that agent can be initialized with a model string.""" from agent import ComputerAgent - agent = ComputerAgent(model="anthropic/claude-3-5-sonnet-20241022") + agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929") assert agent is not None assert hasattr(agent, "model") - assert agent.model == "anthropic/claude-3-5-sonnet-20241022" + assert agent.model == "anthropic/claude-sonnet-4-5-20250929" @patch("agent.agent.litellm") def test_agent_initialization_with_tools(self, mock_litellm, disable_telemetry, mock_computer): """Test that agent can be initialized with tools.""" from agent import ComputerAgent - agent = ComputerAgent(model="anthropic/claude-3-5-sonnet-20241022", tools=[mock_computer]) + agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929", tools=[mock_computer]) assert agent is not None assert hasattr(agent, "tools") @@ -41,7 +41,7 @@ class TestComputerAgentInitialization: budget = 5.0 agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", max_trajectory_budget=budget + model="anthropic/claude-sonnet-4-5-20250929", max_trajectory_budget=budget ) assert agent is not None @@ -79,7 +79,7 @@ class TestComputerAgentRun: mock_litellm.acompletion = AsyncMock(return_value=mock_response) - agent = ComputerAgent(model="anthropic/claude-3-5-sonnet-20241022") + agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929") # Run should return an async generator result_generator = agent.run(sample_messages) @@ -92,7 +92,7 @@ class TestComputerAgentRun: """Test that agent has run method available.""" from agent import ComputerAgent - agent = ComputerAgent(model="anthropic/claude-3-5-sonnet-20241022") + agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929") # Verify run method exists assert hasattr(agent, "run") @@ -102,7 +102,7 @@ class TestComputerAgentRun: """Test that agent has agent_loop initialized.""" from agent import ComputerAgent - agent = ComputerAgent(model="anthropic/claude-3-5-sonnet-20241022") + agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929") # Verify agent_loop is initialized assert hasattr(agent, "agent_loop") @@ -132,7 +132,7 @@ class TestComputerAgentIntegration: """Test that agent can be initialized with Computer tool.""" from agent import ComputerAgent - agent = ComputerAgent(model="anthropic/claude-3-5-sonnet-20241022", tools=[mock_computer]) + agent = ComputerAgent(model="anthropic/claude-sonnet-4-5-20250929", tools=[mock_computer]) # Verify agent accepted the tool assert agent is not None diff --git a/libs/python/computer-server/.bumpversion.cfg b/libs/python/computer-server/.bumpversion.cfg index baba7c21..b668353e 100644 --- a/libs/python/computer-server/.bumpversion.cfg +++ b/libs/python/computer-server/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.29 +current_version = 0.1.30 commit = True tag = True tag_name = computer-server-v{new_version} diff --git a/libs/python/computer-server/README.md b/libs/python/computer-server/README.md index 567af7d4..7356bec3 100644 --- a/libs/python/computer-server/README.md +++ b/libs/python/computer-server/README.md @@ -36,11 +36,11 @@ pip install cua-computer-server Refer to this notebook for a step-by-step guide on how to use the Computer-Use Server on the host system or VM: -- [Computer-Use Server](../../notebooks/computer_server_nb.ipynb) +- [Computer-Use Server](../../../notebooks/computer_server_nb.ipynb) ## Docs - [Commands](https://cua.ai/docs/libraries/computer-server/Commands) - [REST-API](https://cua.ai/docs/libraries/computer-server/REST-API) - [WebSocket-API](https://cua.ai/docs/libraries/computer-server/WebSocket-API) -- [Index](https://cua.ai/docs/libraries/computer-server/index) +- [Index](https://cua.ai/docs/libraries/computer-server) diff --git a/libs/python/computer-server/computer_server/handlers/macos.py b/libs/python/computer-server/computer_server/handlers/macos.py index ce341668..6a831c17 100644 --- a/libs/python/computer-server/computer_server/handlers/macos.py +++ b/libs/python/computer-server/computer_server/handlers/macos.py @@ -1287,7 +1287,15 @@ class MacOSAutomationHandler(BaseAutomationHandler): if not isinstance(screenshot, Image.Image): return {"success": False, "error": "Failed to capture screenshot"} + # Resize image to reduce size (max width 1920, maintain aspect ratio) + max_width = 1920 + if screenshot.width > max_width: + ratio = max_width / screenshot.width + new_height = int(screenshot.height * ratio) + screenshot = screenshot.resize((max_width, new_height), Image.Resampling.LANCZOS) + buffered = BytesIO() + # Use PNG format with optimization to reduce file size screenshot.save(buffered, format="PNG", optimize=True) buffered.seek(0) image_data = base64.b64encode(buffered.getvalue()).decode() diff --git a/libs/python/computer-server/computer_server/watchdog.py b/libs/python/computer-server/computer_server/watchdog.py index 7c9ca83f..50ade796 100644 --- a/libs/python/computer-server/computer_server/watchdog.py +++ b/libs/python/computer-server/computer_server/watchdog.py @@ -75,14 +75,23 @@ class Watchdog: Returns: WebSocket URI for the Computer API Server """ - ip_address = ( - "localhost" - if not self.container_name - else f"{self.container_name}.containers.cloud.trycua.com" - ) - protocol = "wss" if self.container_name else "ws" - port = "8443" if self.container_name else "8000" - return f"{protocol}://{ip_address}:{port}/ws" + if not self.container_name: + return "ws://localhost:8000/ws" + + # Try .sandbox.cua.ai first, fallback to .containers.cloud.trycua.com + return f"wss://{self.container_name}.sandbox.cua.ai:8443/ws" + + @property + def ws_uri_fallback(self) -> str: + """Get the fallback WebSocket URI using legacy hostname. + + Returns: + Fallback WebSocket URI for the Computer API Server + """ + if not self.container_name: + return "ws://localhost:8000/ws" + + return f"wss://{self.container_name}.containers.cloud.trycua.com:8443/ws" async def ping(self) -> bool: """ @@ -91,11 +100,11 @@ class Watchdog: Returns: True if connection successful, False otherwise """ - try: - # Create a simple ping message - ping_message = {"command": "get_screen_size", "params": {}} + # Create a simple ping message + ping_message = {"command": "get_screen_size", "params": {}} - # Try to connect to the WebSocket + # Try primary URI first (.sandbox.cua.ai) + try: async with websockets.connect( self.ws_uri, max_size=1024 * 1024 * 10 # 10MB limit to match server ) as websocket: @@ -105,13 +114,40 @@ class Watchdog: # Wait for any response or just close try: response = await asyncio.wait_for(websocket.recv(), timeout=5) - logger.debug(f"Ping response received: {response[:100]}...") + logger.debug(f"Ping response received from primary URI: {response[:100]}...") return True except asyncio.TimeoutError: return False except Exception as e: - logger.warning(f"Ping failed: {e}") - return False + logger.debug(f"Primary URI ping failed: {e}") + + # Try fallback URI (.containers.cloud.trycua.com) + if self.container_name: + try: + async with websockets.connect( + self.ws_uri_fallback, + max_size=1024 * 1024 * 10, # 10MB limit to match server + ) as websocket: + # Send ping message + await websocket.send(json.dumps(ping_message)) + + # Wait for any response or just close + try: + response = await asyncio.wait_for(websocket.recv(), timeout=5) + logger.debug( + f"Ping response received from fallback URI: {response[:100]}..." + ) + return True + except asyncio.TimeoutError: + return False + except Exception as fallback_e: + logger.warning( + f"Both primary and fallback ping failed. Primary: {e}, Fallback: {fallback_e}" + ) + return False + else: + logger.warning(f"Ping failed: {e}") + return False def kill_processes_on_port(self, port: int) -> bool: """ diff --git a/libs/python/computer-server/pyproject.toml b/libs/python/computer-server/pyproject.toml index c8fc81a8..7bae1e06 100644 --- a/libs/python/computer-server/pyproject.toml +++ b/libs/python/computer-server/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "pdm.backend" [project] name = "cua-computer-server" -version = "0.1.29" +version = "0.1.30" description = "Server component for the Computer-Use Interface (CUI) framework powering Cua" authors = [ diff --git a/libs/python/computer/.bumpversion.cfg b/libs/python/computer/.bumpversion.cfg index be266fe6..44e37d84 100644 --- a/libs/python/computer/.bumpversion.cfg +++ b/libs/python/computer/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.4.11 +current_version = 0.4.17 commit = True tag = True tag_name = computer-v{new_version} diff --git a/libs/python/computer/README.md b/libs/python/computer/README.md index 2cead0cc..d1179b63 100644 --- a/libs/python/computer/README.md +++ b/libs/python/computer/README.md @@ -40,7 +40,7 @@ try: await computer.interface.right_click(300, 300) await computer.interface.double_click(400, 400) - await computer.interface.type("Hello, World!") + await computer.interface.type_text("Hello, World!") await computer.interface.press_key("enter") await computer.interface.set_clipboard("Test clipboard") diff --git a/libs/python/computer/computer/computer.py b/libs/python/computer/computer/computer.py index 8c88f73d..c0c273b6 100644 --- a/libs/python/computer/computer/computer.py +++ b/libs/python/computer/computer/computer.py @@ -107,13 +107,17 @@ class Computer: host: Host to use for VM provider connections (e.g. "localhost", "host.docker.internal") storage: Optional path for persistent VM storage (Lumier provider) ephemeral: Whether to use ephemeral storage - api_key: Optional API key for cloud providers + api_key: Optional API key for cloud providers (defaults to CUA_API_KEY environment variable) experiments: Optional list of experimental features to enable (e.g. ["app-use"]) """ self.logger = Logger("computer", verbosity) self.logger.info("Initializing Computer...") + # Fall back to environment variable for api_key if not provided + if api_key is None: + api_key = os.environ.get("CUA_API_KEY") + if not image: if os_type == "macos": image = "macos-sequoia-cua:latest" diff --git a/libs/python/computer/computer/providers/cloud/provider.py b/libs/python/computer/computer/providers/cloud/provider.py index 7d479686..5d2bf200 100644 --- a/libs/python/computer/computer/providers/cloud/provider.py +++ b/libs/python/computer/computer/providers/cloud/provider.py @@ -31,21 +31,26 @@ class CloudProvider(BaseVMProvider): def __init__( self, - api_key: str, + api_key: Optional[str] = None, verbose: bool = False, api_base: Optional[str] = None, **kwargs, ): """ Args: - api_key: API key for authentication + api_key: API key for authentication (defaults to CUA_API_KEY environment variable) name: Name of the VM verbose: Enable verbose logging """ - assert api_key, "api_key required for CloudProvider" + # Fall back to environment variable if api_key not provided + if api_key is None: + api_key = os.getenv("CUA_API_KEY") + assert api_key, "api_key required for CloudProvider (provide via parameter or CUA_API_KEY environment variable)" self.api_key = api_key self.verbose = verbose self.api_base = (api_base or DEFAULT_API_BASE).rstrip("/") + # Host caching dictionary: {vm_name: host_string} + self._host_cache: Dict[str, str] = {} @property def provider_type(self) -> VMProviderType: @@ -60,12 +65,12 @@ class CloudProvider(BaseVMProvider): async def get_vm(self, name: str, storage: Optional[str] = None) -> Dict[str, Any]: """Get VM information by querying the VM status endpoint. - - Build hostname via get_ip(name) β†’ "{name}.containers.cloud.trycua.com" + - Build hostname via _get_host_for_vm(name) using cached host or fallback - Probe https://{hostname}:8443/status with a short timeout - If JSON contains a "status" field, return it; otherwise infer - Fallback to DNS resolve check to distinguish unknown vs not_found """ - hostname = await self.get_ip(name=name) + hostname = await self._get_host_for_vm(name) # Try HTTPS probe to the computer-server status endpoint (8443) try: @@ -118,8 +123,20 @@ class CloudProvider(BaseVMProvider): vm = dict(item) if isinstance(item, dict) else {} name = vm.get("name") password = vm.get("password") + api_host = vm.get("host") # Read host from API response + if isinstance(name, str) and name: - host = f"{name}.containers.cloud.trycua.com" + # Use host from API if available, otherwise fallback to legacy format + if isinstance(api_host, str) and api_host: + host = api_host + # Cache the host for this VM + self._host_cache[name] = host + else: + # Legacy fallback + host = f"{name}.containers.cloud.trycua.com" + # Cache the legacy host + self._host_cache[name] = host + # api_url: always set if missing if not vm.get("api_url"): vm["api_url"] = f"https://{host}:8443" @@ -227,15 +244,73 @@ class CloudProvider(BaseVMProvider): "message": "update_vm not supported by public API", } + async def _get_host_for_vm(self, name: str) -> str: + """ + Get the host for a VM, trying multiple approaches: + 1. Check cache first + 2. Try to refresh cache by calling list_vms + 3. Try .sandbox.cua.ai format + 4. Fallback to legacy .containers.cloud.trycua.com format + + Args: + name: VM name + + Returns: + Host string for the VM + """ + # Check cache first + if name in self._host_cache: + return self._host_cache[name] + + # Try to refresh cache by calling list_vms + try: + await self.list_vms() + # Check cache again after refresh + if name in self._host_cache: + return self._host_cache[name] + except Exception as e: + logger.warning(f"Failed to refresh VM list for host lookup: {e}") + + # Try .sandbox.cua.ai format first + sandbox_host = f"{name}.sandbox.cua.ai" + if await self._test_host_connectivity(sandbox_host): + self._host_cache[name] = sandbox_host + return sandbox_host + + # Fallback to legacy format + legacy_host = f"{name}.containers.cloud.trycua.com" + # Cache the legacy host + self._host_cache[name] = legacy_host + return legacy_host + + async def _test_host_connectivity(self, hostname: str) -> bool: + """ + Test if a host is reachable by trying to connect to its status endpoint. + + Args: + hostname: Host to test + + Returns: + True if host is reachable, False otherwise + """ + try: + timeout = aiohttp.ClientTimeout(total=2) # Short timeout for connectivity test + async with aiohttp.ClientSession(timeout=timeout) as session: + url = f"https://{hostname}:8443/status" + async with session.get(url, allow_redirects=False) as resp: + # Any response (even error) means the host is reachable + return True + except Exception: + return False + async def get_ip( self, name: Optional[str] = None, storage: Optional[str] = None, retry_delay: int = 2 ) -> str: """ - Return the VM's IP address as '{container_name}.containers.cloud.trycua.com'. - Uses the provided 'name' argument (the VM name requested by the caller), - falling back to self.name only if 'name' is None. - Retries up to 3 times with retry_delay seconds if hostname is not available. + Return the VM's host address, trying to use cached host from API or falling back to legacy format. + Uses the provided 'name' argument (the VM name requested by the caller). """ if name is None: raise ValueError("VM name is required for CloudProvider.get_ip") - return f"{name}.containers.cloud.trycua.com" + + return await self._get_host_for_vm(name) diff --git a/libs/python/computer/pyproject.toml b/libs/python/computer/pyproject.toml index 1e4f49c4..b3aca539 100644 --- a/libs/python/computer/pyproject.toml +++ b/libs/python/computer/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "pdm.backend" [project] name = "cua-computer" -version = "0.4.11" +version = "0.4.17" description = "Computer-Use Interface (CUI) framework powering Cua" readme = "README.md" authors = [ diff --git a/libs/python/core/core/telemetry/posthog.py b/libs/python/core/core/telemetry/posthog.py index 5edd7343..29f5a661 100644 --- a/libs/python/core/core/telemetry/posthog.py +++ b/libs/python/core/core/telemetry/posthog.py @@ -44,13 +44,12 @@ class PostHogTelemetryClient: @classmethod def is_telemetry_enabled(cls) -> bool: """True if telemetry is currently active for this process.""" - return ( - # Legacy opt-out flag - os.environ.get("CUA_TELEMETRY", "").lower() != "off" - # Opt-in flag (defaults to enabled) - and os.environ.get("CUA_TELEMETRY_ENABLED", "true").lower() - in {"1", "true", "yes", "on"} - ) + return os.environ.get("CUA_TELEMETRY_ENABLED", "true").lower() in { + "1", + "true", + "yes", + "on", + } def _get_or_create_installation_id(self) -> str: """Get or create a unique installation ID that persists across runs. diff --git a/libs/python/core/tests/test_telemetry.py b/libs/python/core/tests/test_telemetry.py index 5446a884..93f4ae3d 100644 --- a/libs/python/core/tests/test_telemetry.py +++ b/libs/python/core/tests/test_telemetry.py @@ -24,15 +24,7 @@ class TestTelemetryEnabled: assert is_telemetry_enabled() is True - def test_telemetry_disabled_with_legacy_flag(self, monkeypatch): - """Test that telemetry can be disabled with legacy CUA_TELEMETRY=off.""" - monkeypatch.setenv("CUA_TELEMETRY", "off") - - from core.telemetry import is_telemetry_enabled - - assert is_telemetry_enabled() is False - - def test_telemetry_disabled_with_new_flag(self, monkeypatch): + def test_telemetry_disabled_with_flag(self, monkeypatch): """Test that telemetry can be disabled with CUA_TELEMETRY_ENABLED=false.""" monkeypatch.setenv("CUA_TELEMETRY_ENABLED", "false") diff --git a/libs/python/mcp-server/CONCURRENT_SESSIONS.md b/libs/python/mcp-server/CONCURRENT_SESSIONS.md index 62e63dd2..f3e16099 100644 --- a/libs/python/mcp-server/CONCURRENT_SESSIONS.md +++ b/libs/python/mcp-server/CONCURRENT_SESSIONS.md @@ -133,7 +133,7 @@ await cleanup_session(ctx, "session-to-cleanup") ### Environment Variables -- `CUA_MODEL_NAME`: Model to use (default: `anthropic/claude-3-5-sonnet-20241022`) +- `CUA_MODEL_NAME`: Model to use (default: `anthropic/claude-sonnet-4-5-20250929`) - `CUA_MAX_IMAGES`: Maximum images to keep (default: `3`) ### Session Manager Configuration diff --git a/libs/python/mcp-server/QUICK_TEST_COMMANDS.sh b/libs/python/mcp-server/QUICK_TEST_COMMANDS.sh new file mode 100755 index 00000000..3242c610 --- /dev/null +++ b/libs/python/mcp-server/QUICK_TEST_COMMANDS.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Quick Test Commands for MCP Server Local Desktop Option +# Run these commands to test the implementation + +set -e # Exit on error + +echo "======================================================================" +echo "Testing MCP Server Local Desktop Option" +echo "======================================================================" +echo "" + +# Change to repo root +cd "$(dirname "$0")/.." + +# Test 1: Quick Logic Test (No setup required) +echo "Test 1: Quick Logic Test (No setup required)" +echo "----------------------------------------------------------------------" +python tests/quick_test_local_option.py +echo "" + +# Test 2: Automated Tests (Requires pytest and packages) +echo "Test 2: Automated Tests (Requires pytest and packages installed)" +echo "----------------------------------------------------------------------" +if command -v pytest &> /dev/null; then + echo "Running pytest..." + pytest tests/test_mcp_server_local_option.py -v || echo "Note: Some tests may require full setup" +else + echo "⚠️ pytest not found. Install with: pip install pytest" +fi +echo "" + +# Test 3: Existing MCP server tests +echo "Test 3: Existing MCP Server Tests" +echo "----------------------------------------------------------------------" +if command -v pytest &> /dev/null; then + echo "Running existing session management tests..." + pytest tests/test_mcp_server_session_management.py -v || echo "Note: Some tests may fail if dependencies are missing" +else + echo "⚠️ pytest not found. Install with: pip install pytest" +fi +echo "" + +# Summary +echo "======================================================================" +echo "Test Summary" +echo "======================================================================" +echo "βœ… Quick logic test completed" +echo "" +echo "Next steps for comprehensive testing:" +echo "1. Install dependencies:" +echo " pip install -e libs/python/core" +echo " pip install -e libs/python/computer" +echo " pip install -e libs/python/agent" +echo " pip install -e libs/python/mcp-server" +echo " pip install -e libs/python/computer-server" +echo "" +echo "2. For manual end-to-end testing, see:" +echo " tests/MANUAL_TEST_LOCAL_OPTION.md" +echo "" +echo "3. For detailed testing info, see:" +echo " tests/TESTING_SUMMARY.md" +echo "" + diff --git a/libs/python/mcp-server/README.md b/libs/python/mcp-server/README.md index 4c24fd3e..9d8c95af 100644 --- a/libs/python/mcp-server/README.md +++ b/libs/python/mcp-server/README.md @@ -44,7 +44,7 @@ Add this to your MCP client configuration: "args": [ "bash", "-lc", - "export CUA_MODEL_NAME='anthropic/claude-3-5-sonnet-20241022'; ~/.cua/start_mcp_server.sh" + "export CUA_MODEL_NAME='anthropic/claude-sonnet-4-5-20250929'; ~/.cua/start_mcp_server.sh" ] } } diff --git a/libs/python/mcp-server/mcp_server/server.py b/libs/python/mcp-server/mcp_server/server.py index 7d47cfd1..33e97f4c 100644 --- a/libs/python/mcp-server/mcp_server/server.py +++ b/libs/python/mcp-server/mcp_server/server.py @@ -156,7 +156,7 @@ def serve() -> FastMCP: try: # Get model name - model_name = os.getenv("CUA_MODEL_NAME", "anthropic/claude-3-5-sonnet-20241022") + model_name = os.getenv("CUA_MODEL_NAME", "anthropic/claude-sonnet-4-5-20250929") logger.info(f"Using model: {model_name}") # Create agent with the new v0.4.x API diff --git a/libs/python/mcp-server/mcp_server/session_manager.py b/libs/python/mcp-server/mcp_server/session_manager.py index dc8d480b..a415feac 100644 --- a/libs/python/mcp-server/mcp_server/session_manager.py +++ b/libs/python/mcp-server/mcp_server/session_manager.py @@ -10,6 +10,7 @@ This module provides: import asyncio import logging +import os import time import uuid import weakref @@ -57,7 +58,14 @@ class ComputerPool: logger.debug("Creating new computer instance") from computer import Computer - computer = Computer(verbosity=logging.INFO) + # Check if we should use host computer server + use_host = os.getenv("CUA_USE_HOST_COMPUTER_SERVER", "false").lower() in ( + "true", + "1", + "yes", + ) + + computer = Computer(verbosity=logging.INFO, use_host_computer_server=use_host) await computer.run() self._in_use.add(computer) return computer diff --git a/libs/python/mcp-server/quick_test_local_option.py b/libs/python/mcp-server/quick_test_local_option.py new file mode 100755 index 00000000..6e2caab2 --- /dev/null +++ b/libs/python/mcp-server/quick_test_local_option.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python3 +""" +Quick test to verify the local desktop option logic without full setup. + +This script tests the environment variable parsing and logic flow +without requiring VMs, computer-server, or MCP clients to be running. +""" + +import os +import sys + + +def test_env_var_parsing(): + """Test that environment variable is parsed correctly.""" + print("Testing CUA_USE_HOST_COMPUTER_SERVER environment variable parsing...") + print("-" * 60) + + test_cases = [ + # (env_value, expected_result, description) + ("true", True, "lowercase 'true'"), + ("True", True, "capitalized 'True'"), + ("TRUE", True, "uppercase 'TRUE'"), + ("1", True, "numeric '1'"), + ("yes", True, "lowercase 'yes'"), + ("Yes", True, "capitalized 'Yes'"), + ("false", False, "lowercase 'false'"), + ("False", False, "capitalized 'False'"), + ("FALSE", False, "uppercase 'FALSE'"), + ("0", False, "numeric '0'"), + ("no", False, "lowercase 'no'"), + ("", False, "empty string"), + ("random", False, "random value"), + (None, False, "not set (None)"), + ] + + passed = 0 + failed = 0 + + for env_value, expected, description in test_cases: + # Simulate the logic from session_manager.py line 59 + if env_value is None: + actual = os.getenv("CUA_USE_HOST_COMPUTER_SERVER", "false").lower() in ( + "true", + "1", + "yes", + ) + else: + os.environ["CUA_USE_HOST_COMPUTER_SERVER"] = env_value + actual = os.getenv("CUA_USE_HOST_COMPUTER_SERVER", "false").lower() in ( + "true", + "1", + "yes", + ) + + status = "βœ“ PASS" if actual == expected else "βœ— FAIL" + if actual == expected: + passed += 1 + else: + failed += 1 + + print( + f"{status} | Value: {env_value!r:15} | Expected: {expected!s:5} | Got: {actual!s:5} | {description}" + ) + + # Clean up + os.environ.pop("CUA_USE_HOST_COMPUTER_SERVER", None) + + print("-" * 60) + print(f"Results: {passed} passed, {failed} failed") + return failed == 0 + + +def test_session_manager_logic(): + """Test the logic flow in session_manager.py without actual Computer creation.""" + print("\nTesting session_manager.py logic flow...") + print("-" * 60) + + # Read the actual session_manager.py to verify the logic + import pathlib + + session_manager_path = ( + pathlib.Path(__file__).parent.parent + / "libs" + / "python" + / "mcp-server" + / "mcp_server" + / "session_manager.py" + ) + + if not session_manager_path.exists(): + print(f"βœ— FAIL | session_manager.py not found at {session_manager_path}") + return False + + content = session_manager_path.read_text() + + # Check for the key logic + checks = [ + ('os.getenv("CUA_USE_HOST_COMPUTER_SERVER"', "Environment variable check present"), + ("use_host_computer_server=use_host", "use_host_computer_server parameter passed"), + ("Computer(", "Computer instantiation present"), + ] + + all_checks_passed = True + for check_str, description in checks: + if check_str in content: + print(f"βœ“ PASS | {description}") + else: + print(f"βœ— FAIL | {description} - not found") + all_checks_passed = False + + print("-" * 60) + return all_checks_passed + + +def test_documentation_consistency(): + """Verify documentation mentions the new feature.""" + print("\nTesting documentation consistency...") + print("-" * 60) + + import pathlib + + docs_to_check = [ + ("configuration.mdx", "CUA_USE_HOST_COMPUTER_SERVER"), + ("usage.mdx", "Targeting Your Local Desktop"), + ] + + docs_path = ( + pathlib.Path(__file__).parent.parent + / "docs" + / "content" + / "docs" + / "libraries" + / "mcp-server" + ) + + all_docs_ok = True + for doc_file, expected_content in docs_to_check: + doc_path = docs_path / doc_file + if not doc_path.exists(): + print(f"βœ— FAIL | {doc_file} not found") + all_docs_ok = False + continue + + content = doc_path.read_text() + if expected_content in content: + print(f"βœ“ PASS | {doc_file} contains '{expected_content}'") + else: + print(f"βœ— FAIL | {doc_file} missing '{expected_content}'") + all_docs_ok = False + + print("-" * 60) + return all_docs_ok + + +def print_usage_examples(): + """Print usage examples for both modes.""" + print("\n" + "=" * 60) + print("USAGE EXAMPLES") + print("=" * 60) + + print("\n1. DEFAULT MODE (VM):") + print("-" * 60) + print( + """ +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-5-20250929" + } + } + } +} + +Note: CUA_USE_HOST_COMPUTER_SERVER is not set, so VM mode is used (safe). +""" + ) + + print("\n2. LOCAL DESKTOP MODE:") + print("-" * 60) + print( + """ +Step 1: Start computer-server locally: + python -m computer_server + +Step 2: Configure MCP client: +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-sonnet-4-5-20250929", + "CUA_USE_HOST_COMPUTER_SERVER": "true" + } + } + } +} + +⚠️ WARNING: AI will have direct access to your desktop! +""" + ) + + +def main(): + """Run all quick tests.""" + print("=" * 60) + print("QUICK TEST: MCP Server Local Desktop Option") + print("=" * 60) + print() + + results = [] + + # Run tests + results.append(("Environment Variable Parsing", test_env_var_parsing())) + results.append(("Session Manager Logic", test_session_manager_logic())) + results.append(("Documentation Consistency", test_documentation_consistency())) + + # Print summary + print("\n" + "=" * 60) + print("SUMMARY") + print("=" * 60) + for test_name, passed in results: + status = "βœ“ PASSED" if passed else "βœ— FAILED" + print(f"{status} | {test_name}") + + all_passed = all(result for _, result in results) + + if all_passed: + print("\nπŸŽ‰ All quick tests passed!") + print_usage_examples() + print("\nNext steps:") + print("1. Run full automated tests: pytest tests/test_mcp_server_local_option.py") + print("2. Follow manual testing guide: tests/MANUAL_TEST_LOCAL_OPTION.md") + return 0 + else: + print("\n❌ Some tests failed. Please review the output above.") + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/libs/python/mcp-server/test_mcp_server_local_option.py b/libs/python/mcp-server/test_mcp_server_local_option.py new file mode 100644 index 00000000..b1540726 --- /dev/null +++ b/libs/python/mcp-server/test_mcp_server_local_option.py @@ -0,0 +1,138 @@ +""" +Test script to verify MCP Server local desktop option works correctly. + +This test verifies: +1. Default behavior: Computer uses VM +2. New behavior: Computer uses host when CUA_USE_HOST_COMPUTER_SERVER=true +""" + +import asyncio +import os +import sys +from pathlib import Path + +# Add the mcp-server module to path +mcp_server_path = Path(__file__).parent.parent / "libs" / "python" / "mcp-server" +sys.path.insert(0, str(mcp_server_path.parent.parent.parent / "libs" / "python")) + +import pytest + + +@pytest.mark.asyncio +async def test_default_vm_mode(): + """Test that the default mode uses VM (not host computer server).""" + # Ensure environment variable is not set or is false + os.environ.pop("CUA_USE_HOST_COMPUTER_SERVER", None) + + from mcp_server.session_manager import ComputerPool + + pool = ComputerPool(max_size=1) + + try: + computer = await pool.acquire() + + # Verify the computer was initialized + assert computer is not None + + # Check that use_host_computer_server was set to False (default) + # This should start a VM + print("βœ“ Default mode: Computer initialized (VM mode expected)") + + await pool.release(computer) + + finally: + await pool.shutdown() + + +@pytest.mark.asyncio +async def test_local_desktop_mode(): + """Test that setting CUA_USE_HOST_COMPUTER_SERVER=true uses host.""" + # Set environment variable to true + os.environ["CUA_USE_HOST_COMPUTER_SERVER"] = "true" + + # Need to reload module to pick up new env var + import importlib + + import mcp_server.session_manager + from mcp_server.session_manager import ComputerPool + + importlib.reload(mcp_server.session_manager) + + pool = mcp_server.session_manager.ComputerPool(max_size=1) + + try: + computer = await pool.acquire() + + # Verify the computer was initialized + assert computer is not None + + # Check that use_host_computer_server was set to True + print("βœ“ Local mode: Computer initialized (host mode expected)") + + await pool.release(computer) + + finally: + await pool.shutdown() + # Clean up env var + os.environ.pop("CUA_USE_HOST_COMPUTER_SERVER", None) + + +@pytest.mark.asyncio +async def test_env_var_parsing(): + """Test that various values of CUA_USE_HOST_COMPUTER_SERVER are parsed correctly.""" + test_cases = [ + ("true", True), + ("True", True), + ("TRUE", True), + ("1", True), + ("yes", True), + ("false", False), + ("False", False), + ("FALSE", False), + ("0", False), + ("no", False), + ("", False), + ("random", False), + ] + + for value, expected in test_cases: + os.environ["CUA_USE_HOST_COMPUTER_SERVER"] = value + + # Check parsing logic + use_host = os.getenv("CUA_USE_HOST_COMPUTER_SERVER", "false").lower() in ( + "true", + "1", + "yes", + ) + + assert ( + use_host == expected + ), f"Failed for value '{value}': expected {expected}, got {use_host}" + print(f"βœ“ Env var '{value}' correctly parsed as {expected}") + + os.environ.pop("CUA_USE_HOST_COMPUTER_SERVER", None) + + +if __name__ == "__main__": + print("Testing MCP Server Local Desktop Option") + print("=" * 60) + + print("\n1. Testing environment variable parsing...") + asyncio.run(test_env_var_parsing()) + + print("\n2. Testing default VM mode...") + try: + asyncio.run(test_default_vm_mode()) + except Exception as e: + print(f"βœ— Default VM mode test failed: {e}") + print("Note: This may require lume/VM setup to fully test") + + print("\n3. Testing local desktop mode...") + try: + asyncio.run(test_local_desktop_mode()) + except Exception as e: + print(f"βœ— Local desktop mode test failed: {e}") + print("Note: This may require computer-server to be running locally") + + print("\n" + "=" * 60) + print("Tests completed!") diff --git a/libs/typescript/agent/README.md b/libs/typescript/agent/README.md index 27c152fb..42cb4184 100644 --- a/libs/typescript/agent/README.md +++ b/libs/typescript/agent/README.md @@ -32,7 +32,7 @@ const peerClient = new AgentClient('peer://my-agent-proxy'); // Send a simple text request const response = await client.responses.create({ - model: 'anthropic/claude-3-5-sonnet-20241022', + model: 'anthropic/claude-sonnet-4-5-20250929', input: 'Write a one-sentence bedtime story about a unicorn.', // Optional per-request env overrides env: { @@ -47,7 +47,7 @@ console.log(response.output); ```typescript const response = await client.responses.create({ - model: 'anthropic/claude-3-5-sonnet-20241022', + model: 'anthropic/claude-sonnet-4-5-20250929', input: [ { role: 'user', @@ -74,7 +74,7 @@ const client = new AgentClient('https://localhost:8000', { }); const response = await client.responses.create({ - model: 'anthropic/claude-3-5-sonnet-20241022', + model: 'anthropic/claude-sonnet-4-5-20250929', input: 'Hello, world!', agent_kwargs: { save_trajectory: true, diff --git a/libs/typescript/agent/examples/README.md b/libs/typescript/agent/examples/README.md index d27eac59..68419600 100644 --- a/libs/typescript/agent/examples/README.md +++ b/libs/typescript/agent/examples/README.md @@ -42,7 +42,7 @@ A simple HTML page that demonstrates using the CUA Agent Client in a browser env 4. **Configure and test:** - Enter an agent URL (e.g., `https://localhost:8000` or `peer://some-peer-id`) - - Enter a model name (e.g., `anthropic/claude-3-5-sonnet-20241022`) + - Enter a model name (e.g., `anthropic/claude-sonnet-4-5-20250929`) - Type a message and click "Send Message" or press Enter - View the response in the output textarea @@ -53,7 +53,7 @@ A simple HTML page that demonstrates using the CUA Agent Client in a browser env **Example Models:** -- `anthropic/claude-3-5-sonnet-20241022` +- `anthropic/claude-sonnet-4-5-20250929` - `openai/gpt-4` - `huggingface-local/microsoft/UI-TARS-7B` diff --git a/libs/typescript/core/src/telemetry/clients/posthog.ts b/libs/typescript/core/src/telemetry/clients/posthog.ts index 7b14d162..260a1873 100644 --- a/libs/typescript/core/src/telemetry/clients/posthog.ts +++ b/libs/typescript/core/src/telemetry/clients/posthog.ts @@ -44,14 +44,12 @@ export class PostHogTelemetryClient { sampleRate: TELEMETRY_SAMPLE_RATE, posthog: { apiKey: PUBLIC_POSTHOG_API_KEY, host: PUBLIC_POSTHOG_HOST }, }; - // Check for multiple environment variables that can disable telemetry: - // CUA_TELEMETRY=off to disable telemetry (legacy way) - // CUA_TELEMETRY_DISABLED=1 to disable telemetry (new, more explicit way) - const telemetryDisabled = - process.env.CUA_TELEMETRY?.toLowerCase() === 'off' || - ['1', 'true', 'yes', 'on'].includes(process.env.CUA_TELEMETRY_DISABLED?.toLowerCase() || ''); + // Check CUA_TELEMETRY_ENABLED environment variable (defaults to enabled) + const telemetryEnabled = ['1', 'true', 'yes', 'on'].includes( + process.env.CUA_TELEMETRY_ENABLED?.toLowerCase() || 'true' + ); - this.config.enabled = !telemetryDisabled; + this.config.enabled = telemetryEnabled; this.config.sampleRate = Number.parseFloat( process.env.CUA_TELEMETRY_SAMPLE_RATE || String(TELEMETRY_SAMPLE_RATE) ); diff --git a/libs/typescript/core/tests/telemetry.test.ts b/libs/typescript/core/tests/telemetry.test.ts index 4c4d47f6..7cfbec4e 100644 --- a/libs/typescript/core/tests/telemetry.test.ts +++ b/libs/typescript/core/tests/telemetry.test.ts @@ -4,27 +4,38 @@ import { Telemetry } from '../src/'; describe('Telemetry', () => { let telemetry: Telemetry; beforeEach(() => { - process.env.CUA_TELEMETRY = ''; - process.env.CUA_TELEMETRY_DISABLED = ''; + process.env.CUA_TELEMETRY_ENABLED = ''; telemetry = new Telemetry(); }); describe('telemetry.enabled', () => { - it('should return false when CUA_TELEMETRY is off', () => { - process.env.CUA_TELEMETRY = 'off'; + it('should return false when CUA_TELEMETRY_ENABLED is false', () => { + process.env.CUA_TELEMETRY_ENABLED = 'false'; telemetry = new Telemetry(); expect(telemetry.enabled).toBe(false); }); - it('should return true when CUA_TELEMETRY is not set', () => { - process.env.CUA_TELEMETRY = ''; + it('should return false when CUA_TELEMETRY_ENABLED is 0', () => { + process.env.CUA_TELEMETRY_ENABLED = '0'; + telemetry = new Telemetry(); + expect(telemetry.enabled).toBe(false); + }); + + it('should return true when CUA_TELEMETRY_ENABLED is not set (default)', () => { + delete process.env.CUA_TELEMETRY_ENABLED; telemetry = new Telemetry(); expect(telemetry.enabled).toBe(true); }); - it('should return false if CUA_TELEMETRY_DISABLED is 1', () => { - process.env.CUA_TELEMETRY_DISABLED = '1'; + it('should return true when CUA_TELEMETRY_ENABLED is true', () => { + process.env.CUA_TELEMETRY_ENABLED = 'true'; telemetry = new Telemetry(); - expect(telemetry.enabled).toBe(false); + expect(telemetry.enabled).toBe(true); + }); + + it('should return true when CUA_TELEMETRY_ENABLED is 1', () => { + process.env.CUA_TELEMETRY_ENABLED = '1'; + telemetry = new Telemetry(); + expect(telemetry.enabled).toBe(true); }); }); }); diff --git a/libs/typescript/cua-cli/.gitignore b/libs/typescript/cua-cli/.gitignore new file mode 100644 index 00000000..a14702c4 --- /dev/null +++ b/libs/typescript/cua-cli/.gitignore @@ -0,0 +1,34 @@ +# dependencies (bun install) +node_modules + +# output +out +dist +*.tgz + +# code coverage +coverage +*.lcov + +# logs +logs +_.log +report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json + +# dotenv environment variable files +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# caches +.eslintcache +.cache +*.tsbuildinfo + +# IntelliJ based IDEs +.idea + +# Finder (MacOS) folder config +.DS_Store diff --git a/libs/typescript/cua-cli/.prettierrc b/libs/typescript/cua-cli/.prettierrc new file mode 100644 index 00000000..123d3e80 --- /dev/null +++ b/libs/typescript/cua-cli/.prettierrc @@ -0,0 +1,7 @@ +{ + "semi": true, + "trailingComma": "es5", + "singleQuote": true, + "tabWidth": 2, + "useTabs": false +} diff --git a/libs/typescript/cua-cli/CLAUDE.md b/libs/typescript/cua-cli/CLAUDE.md new file mode 100644 index 00000000..5fa3f4d5 --- /dev/null +++ b/libs/typescript/cua-cli/CLAUDE.md @@ -0,0 +1,105 @@ +Default to using Bun instead of Node.js. + +- Use `bun ` instead of `node ` or `ts-node ` +- Use `bun test` instead of `jest` or `vitest` +- Use `bun build ` instead of `webpack` or `esbuild` +- Use `bun install` instead of `npm install` or `yarn install` or `pnpm install` +- Use `bun run + + +``` + +With the following `frontend.tsx`: + +```tsx#frontend.tsx +import React from "react"; + +// import .css files directly and it works +import './index.css'; + +import { createRoot } from "react-dom/client"; + +const root = createRoot(document.body); + +export default function Frontend() { + return

    Hello, world!

    ; +} + +root.render(); +``` + +Then, run index.ts + +```sh +bun --hot ./index.ts +``` + +For more information, read the Bun API docs in `node_modules/bun-types/docs/**.md`. diff --git a/libs/typescript/cua-cli/README.md b/libs/typescript/cua-cli/README.md new file mode 100644 index 00000000..6edf5c5d --- /dev/null +++ b/libs/typescript/cua-cli/README.md @@ -0,0 +1,97 @@ +# CUA CLI (Bun) + +## Install + +```bash +bun install +bun link # register package globally +bun link cua-cli # install the global binary `cua` +``` + +If you want to run without linking: + +```bash +bun run ./index.ts -- --help +``` + +## Commands + +- **Auth** + + The CLI supports both **flat** and **grouped** command styles: + + ```bash + # Grouped style (explicit) + cua auth login + cua auth env + cua auth logout + + # Flat style (quick) + cua login + cua env + cua logout + ``` + + **Available Commands:** + - `login` – opens browser to authorize; stores API key locally + - `--api-key sk-...` – stores provided key directly + - `env` – writes/updates `.env` with `CUA_API_KEY` + - `logout` – clears stored API key + +- **Sandboxes** + + The CLI supports both **flat** and **grouped** command styles: + + ```bash + # Flat style (quick & concise) + cua list + cua create --os linux --size small --region north-america + cua start + cua stop + + # Grouped style (explicit & clear) + cua sb list # or: cua sandbox list + cua sb create # or: cua sandbox create + cua sb start # or: cua sandbox start + cua sb stop # or: cua sandbox stop + ``` + + **Available Commands:** + - `list` (aliases: `ls`, `ps`) – list all sandboxes + - `create` – create a new sandbox + - `--os`: `linux`, `windows`, `macos` + - `--size`: `small`, `medium`, `large` + - `--region`: `north-america`, `europe`, `asia-pacific`, `south-america` + - `delete ` – delete a sandbox + - `start ` – start a stopped sandbox + - `stop ` – stop a running sandbox + - `restart ` – restart a sandbox + - `vnc ` (alias: `open`) – open VNC desktop in your browser + +## Auth Flow (Dynamic Callback Port) + +- CLI starts a small local HTTP server using `Bun.serve({ port: 0 })` which picks an available port. +- Browser is opened to `https://cua.ai/cli-auth?callback_url=http://127.0.0.1:/callback`. +- After you click "Authorize CLI", the browser redirects to the local server with `?token=...`. +- The CLI saves the API key in `~/.config/cua/cli.sqlite`. + +> Note: If the browser cannot be opened automatically, copy/paste the printed URL. + +## Project Structure + +- `index.ts` – entry point (shebang + start CLI) +- `src/cli.ts` – yargs bootstrapping +- `src/commands/auth.ts` – auth/login/pull/logout commands +- `src/commands/sandbox.ts` – sandbox list/start/stop/restart commands +- `src/auth.ts` – browser flow + local callback server (dynamic port) +- `src/http.ts` – HTTP helper +- `src/storage.ts` – SQLite-backed key-value storage +- `src/config.ts` – constants and paths +- `src/util.ts` – table printing, .env writer + +## Notes + +- Stored API key lives at `~/.config/cua/cli.sqlite` under `kv(api_key)`. +- Public API base defaults to `https://api.cua.ai`. +- Website base defaults to `https://cua.ai`. +- Authorization header: `Authorization: Bearer `. diff --git a/libs/typescript/cua-cli/bun.lock b/libs/typescript/cua-cli/bun.lock new file mode 100644 index 00000000..b91c382e --- /dev/null +++ b/libs/typescript/cua-cli/bun.lock @@ -0,0 +1,128 @@ +{ + "lockfileVersion": 1, + "configVersion": 0, + "workspaces": { + "": { + "name": "cua-cli", + "dependencies": { + "yargs": "^18.0.0", + }, + "devDependencies": { + "@types/bun": "latest", + "@types/yargs": "^17.0.33", + "bumpp": "^10.1.0", + "prettier": "^3.0.0", + "typescript": "^5", + }, + }, + }, + "packages": { + "@types/bun": ["@types/bun@1.3.0", "", { "dependencies": { "bun-types": "1.3.0" } }, "sha512-+lAGCYjXjip2qY375xX/scJeVRmZ5cY0wyHYyCYxNcdEXrQ4AOe3gACgd4iQ8ksOslJtW4VNxBJ8llUwc3a6AA=="], + + "@types/node": ["@types/node@24.9.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-MKNwXh3seSK8WurXF7erHPJ2AONmMwkI7zAMrXZDPIru8jRqkk6rGDBVbw4mLwfqA+ZZliiDPg05JQ3uW66tKQ=="], + + "@types/react": ["@types/react@19.2.2", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-6mDvHUFSjyT2B2yeNx2nUgMxh9LtOWvkhIU3uePn2I2oyNymUAX1NIsdgviM4CH+JSrp2D2hsMvJOkxY+0wNRA=="], + + "@types/yargs": ["@types/yargs@17.0.33", "", { "dependencies": { "@types/yargs-parser": "*" } }, "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA=="], + + "@types/yargs-parser": ["@types/yargs-parser@21.0.3", "", {}, "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ=="], + + "ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "ansis": ["ansis@4.2.0", "", {}, "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig=="], + + "args-tokenizer": ["args-tokenizer@0.3.0", "", {}, "sha512-xXAd7G2Mll5W8uo37GETpQ2VrE84M181Z7ugHFGQnJZ50M2mbOv0osSZ9VsSgPfJQ+LVG0prSi0th+ELMsno7Q=="], + + "bumpp": ["bumpp@10.3.1", "", { "dependencies": { "ansis": "^4.2.0", "args-tokenizer": "^0.3.0", "c12": "^3.3.0", "cac": "^6.7.14", "escalade": "^3.2.0", "jsonc-parser": "^3.3.1", "package-manager-detector": "^1.3.0", "semver": "^7.7.2", "tinyexec": "^1.0.1", "tinyglobby": "^0.2.15", "yaml": "^2.8.1" }, "bin": { "bumpp": "bin/bumpp.mjs" } }, "sha512-cOKPRFCWvHcYPJQAHN6V7Jp/wAfnyqQRXQ+2fgWIL6Gao20rpu7xQ1cGGo1APOfmbQmmHngEPg9Fy7nJ3giRkQ=="], + + "bun-types": ["bun-types@1.3.0", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-u8X0thhx+yJ0KmkxuEo9HAtdfgCBaM/aI9K90VQcQioAmkVp3SG3FkwWGibUFz3WdXAdcsqOcbU40lK7tbHdkQ=="], + + "c12": ["c12@3.3.2", "", { "dependencies": { "chokidar": "^4.0.3", "confbox": "^0.2.2", "defu": "^6.1.4", "dotenv": "^17.2.3", "exsolve": "^1.0.8", "giget": "^2.0.0", "jiti": "^2.6.1", "ohash": "^2.0.11", "pathe": "^2.0.3", "perfect-debounce": "^2.0.0", "pkg-types": "^2.3.0", "rc9": "^2.1.2" }, "peerDependencies": { "magicast": "*" }, "optionalPeers": ["magicast"] }, "sha512-QkikB2X5voO1okL3QsES0N690Sn/K9WokXqUsDQsWy5SnYb+psYQFGA10iy1bZHj3fjISKsI67Q90gruvWWM3A=="], + + "cac": ["cac@6.7.14", "", {}, "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ=="], + + "chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], + + "citty": ["citty@0.1.6", "", { "dependencies": { "consola": "^3.2.3" } }, "sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ=="], + + "cliui": ["cliui@9.0.1", "", { "dependencies": { "string-width": "^7.2.0", "strip-ansi": "^7.1.0", "wrap-ansi": "^9.0.0" } }, "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w=="], + + "confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + + "consola": ["consola@3.4.2", "", {}, "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA=="], + + "csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="], + + "defu": ["defu@6.1.4", "", {}, "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg=="], + + "destr": ["destr@2.0.5", "", {}, "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA=="], + + "dotenv": ["dotenv@17.2.3", "", {}, "sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w=="], + + "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], + + "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], + + "exsolve": ["exsolve@1.0.8", "", {}, "sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA=="], + + "fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], + + "get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="], + + "get-east-asian-width": ["get-east-asian-width@1.4.0", "", {}, "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q=="], + + "giget": ["giget@2.0.0", "", { "dependencies": { "citty": "^0.1.6", "consola": "^3.4.0", "defu": "^6.1.4", "node-fetch-native": "^1.6.6", "nypm": "^0.6.0", "pathe": "^2.0.3" }, "bin": { "giget": "dist/cli.mjs" } }, "sha512-L5bGsVkxJbJgdnwyuheIunkGatUF/zssUoxxjACCseZYAVbaqdh9Tsmmlkl8vYan09H7sbvKt4pS8GqKLBrEzA=="], + + "jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="], + + "jsonc-parser": ["jsonc-parser@3.3.1", "", {}, "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ=="], + + "node-fetch-native": ["node-fetch-native@1.6.7", "", {}, "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q=="], + + "nypm": ["nypm@0.6.2", "", { "dependencies": { "citty": "^0.1.6", "consola": "^3.4.2", "pathe": "^2.0.3", "pkg-types": "^2.3.0", "tinyexec": "^1.0.1" }, "bin": { "nypm": "dist/cli.mjs" } }, "sha512-7eM+hpOtrKrBDCh7Ypu2lJ9Z7PNZBdi/8AT3AX8xoCj43BBVHD0hPSTEvMtkMpfs8FCqBGhxB+uToIQimA111g=="], + + "ohash": ["ohash@2.0.11", "", {}, "sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ=="], + + "package-manager-detector": ["package-manager-detector@1.5.0", "", {}, "sha512-uBj69dVlYe/+wxj8JOpr97XfsxH/eumMt6HqjNTmJDf/6NO9s+0uxeOneIz3AsPt2m6y9PqzDzd3ATcU17MNfw=="], + + "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="], + + "perfect-debounce": ["perfect-debounce@2.0.0", "", {}, "sha512-fkEH/OBiKrqqI/yIgjR92lMfs2K8105zt/VT6+7eTjNwisrsh47CeIED9z58zI7DfKdH3uHAn25ziRZn3kgAow=="], + + "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], + + "pkg-types": ["pkg-types@2.3.0", "", { "dependencies": { "confbox": "^0.2.2", "exsolve": "^1.0.7", "pathe": "^2.0.3" } }, "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig=="], + + "prettier": ["prettier@3.6.2", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ=="], + + "rc9": ["rc9@2.1.2", "", { "dependencies": { "defu": "^6.1.4", "destr": "^2.0.3" } }, "sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg=="], + + "readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="], + + "semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + + "string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], + + "strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], + + "tinyexec": ["tinyexec@1.0.2", "", {}, "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg=="], + + "tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], + + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="], + + "wrap-ansi": ["wrap-ansi@9.0.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="], + + "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], + + "yaml": ["yaml@2.8.1", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw=="], + + "yargs": ["yargs@18.0.0", "", { "dependencies": { "cliui": "^9.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "string-width": "^7.2.0", "y18n": "^5.0.5", "yargs-parser": "^22.0.0" } }, "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg=="], + + "yargs-parser": ["yargs-parser@22.0.0", "", {}, "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw=="], + } +} diff --git a/libs/typescript/cua-cli/index.ts b/libs/typescript/cua-cli/index.ts new file mode 100755 index 00000000..ca291983 --- /dev/null +++ b/libs/typescript/cua-cli/index.ts @@ -0,0 +1,7 @@ +#! /usr/bin/env bun +import { runCli } from './src/cli'; + +runCli().catch((err) => { + console.error(err); + process.exit(1); +}); diff --git a/libs/typescript/cua-cli/package.json b/libs/typescript/cua-cli/package.json new file mode 100644 index 00000000..5a79ef77 --- /dev/null +++ b/libs/typescript/cua-cli/package.json @@ -0,0 +1,45 @@ +{ + "name": "@trycua/cli", + "version": "0.1.5", + "packageManager": "bun@1.1.38", + "description": "Command-line interface for CUA cloud sandboxes and authentication", + "type": "module", + "license": "MIT", + "homepage": "https://github.com/trycua/cua/tree/main/libs/typescript/cua-cli", + "bugs": { + "url": "https://github.com/trycua/cua/issues" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/trycua/cua.git" + }, + "author": "cua", + "files": [ + "index.ts", + "src", + "README.md" + ], + "bin": { + "cua": "./index.ts" + }, + "publishConfig": { + "access": "public" + }, + "scripts": { + "lint": "prettier --check .", + "lint:fix": "prettier --write .", + "typecheck": "tsc --noEmit", + "release": "bumpp && bun publish", + "prepublishOnly": "bun run typecheck" + }, + "dependencies": { + "yargs": "^18.0.0" + }, + "devDependencies": { + "@types/bun": "latest", + "@types/yargs": "^17.0.33", + "bumpp": "^10.1.0", + "prettier": "^3.0.0", + "typescript": "^5" + } +} diff --git a/libs/typescript/cua-cli/src/auth.ts b/libs/typescript/cua-cli/src/auth.ts new file mode 100644 index 00000000..e818ba80 --- /dev/null +++ b/libs/typescript/cua-cli/src/auth.ts @@ -0,0 +1,75 @@ +import { AUTH_PAGE, CALLBACK_HOST } from './config'; +import { setApiKey, getApiKey } from './storage'; +import { openInBrowser } from './util'; + +const c = { + reset: '\x1b[0m', + bold: '\x1b[1m', + dim: '\x1b[2m', + underline: '\x1b[4m', + cyan: '\x1b[36m', + green: '\x1b[32m', + yellow: '\x1b[33m', +}; + +export async function loginViaBrowser(): Promise { + let resolveToken!: (v: string) => void; + const tokenPromise = new Promise((resolve) => { + resolveToken = resolve; + }); + + // dynamic port (0) -> OS chooses available port + const server = Bun.serve({ + hostname: CALLBACK_HOST, + port: 0, + fetch(req) { + const u = new URL(req.url); + if (u.pathname !== '/callback') + return new Response('Not found', { status: 404 }); + const token = u.searchParams.get('token'); + if (!token) return new Response('Missing token', { status: 400 }); + resolveToken(token); + queueMicrotask(() => server.stop()); + return new Response('CLI authorized. You can close this window.', { + status: 200, + headers: { 'content-type': 'text/plain' }, + }); + }, + }); + + const callbackURL = `http://${CALLBACK_HOST}:${server.port}/callback`; + const url = `${AUTH_PAGE}?callback_url=${encodeURIComponent(callbackURL)}`; + console.log( + `${c.cyan}${c.bold}Opening your default browser to authorize the CLI...${c.reset}` + ); + console.log( + `${c.dim}If the browser does not open automatically, copy/paste this URL:${c.reset}` + ); + console.log(`${c.yellow}${c.underline}${url}${c.reset}`); + await openInBrowser(url); + + let timeoutId: ReturnType | undefined; + const timeout = new Promise((_, reject) => { + timeoutId = setTimeout( + () => reject(new Error('Timed out waiting for authorization')), + 2 * 60 * 1000 + ); + }); + try { + const result = await Promise.race([tokenPromise, timeout]); + if (timeoutId) clearTimeout(timeoutId); + return result; + } finally { + try { + server.stop(); + } catch {} + } +} + +export async function ensureApiKeyInteractive(): Promise { + const existing = getApiKey(); + if (existing) return existing; + const token = await loginViaBrowser(); + setApiKey(token); + return token; +} diff --git a/libs/typescript/cua-cli/src/cli.ts b/libs/typescript/cua-cli/src/cli.ts new file mode 100644 index 00000000..7ee7d080 --- /dev/null +++ b/libs/typescript/cua-cli/src/cli.ts @@ -0,0 +1,57 @@ +import yargs from 'yargs'; +import { hideBin } from 'yargs/helpers'; +import { registerAuthCommands } from './commands/auth'; +import { registerSandboxCommands } from './commands/sandbox'; + +export async function runCli() { + let argv = yargs(hideBin(process.argv)) + .scriptName('cua') + .usage('Usage: $0 [options]') + .epilogue( + 'Recommended Command Structure:\n' + + ' cua auth Authenticate and manage credentials\n' + + ' login Login via browser or with API key\n' + + ' env Export API key to .env file\n' + + ' logout Clear stored credentials\n' + + '\n' + + ' cua sb Create and manage cloud sandboxes\n' + + ' list View all your sandboxes\n' + + ' create Provision a new sandbox\n' + + ' start Start or resume a sandbox\n' + + ' stop Stop a sandbox (preserves disk)\n' + + ' suspend Suspend a sandbox (preserves memory)\n' + + ' vnc Open remote desktop\n' + + '\n' + + 'Documentation: https://docs.cua.ai/libraries/cua-cli/commands' + ); + // Override the default --version behavior + argv = argv.version(false).option('version', { + alias: 'v', + describe: 'Show CUA CLI version', + type: 'boolean', + global: false, + }); + argv = registerAuthCommands(argv); + argv = registerSandboxCommands(argv); + + // Check for version flag before command validation + const args = process.argv.slice(2); + if (args.includes('--version') || args.includes('-v')) { + try { + const home = process.env.HOME || process.env.USERPROFILE || ''; + const path = `${home}/.cua/bin/.version`; + const version = await Bun.file(path).text(); + const v = version.trim(); + if (v) { + console.log(v); + } else { + console.log('unknown'); + } + } catch { + console.log('unknown'); + } + process.exit(0); + } + + await argv.demandCommand(1).strict().help().parseAsync(); +} diff --git a/libs/typescript/cua-cli/src/commands/auth.ts b/libs/typescript/cua-cli/src/commands/auth.ts new file mode 100644 index 00000000..b7fc6eaf --- /dev/null +++ b/libs/typescript/cua-cli/src/commands/auth.ts @@ -0,0 +1,89 @@ +import { setApiKey, clearApiKey } from '../storage'; +import { ensureApiKeyInteractive, loginViaBrowser } from '../auth'; +import { writeEnvFile } from '../util'; +import type { Argv } from 'yargs'; + +// Command handlers +const loginHandler = async (argv: Record) => { + if (argv['api-key']) { + setApiKey(String(argv['api-key'])); + console.log('API key saved'); + return; + } + console.log('Opening browser for CLI auth...'); + const token = await loginViaBrowser(); + setApiKey(token); + console.log('API key saved'); +}; + +const envHandler = async (_argv: Record) => { + const token = await ensureApiKeyInteractive(); + const out = await writeEnvFile(process.cwd(), token); + console.log(`Wrote ${out}`); +}; + +const logoutHandler = async (_argv: Record) => { + clearApiKey(); + console.log('Logged out'); +}; + +export function registerAuthCommands(y: Argv) { + // Grouped structure: cua auth (register first to appear first in help) + y.command( + 'auth', + 'Authenticate with CUA (login, logout, or export credentials)', + (y) => { + return y + .command( + 'login', + 'Authenticate via browser or API key and save credentials locally', + (y) => + y.option('api-key', { + type: 'string', + describe: 'API key to store directly', + }), + loginHandler + ) + .command( + 'env', + 'Export your API key to a .env file in the current directory', + () => {}, + envHandler + ) + .command( + 'logout', + 'Clear stored API credentials from this machine', + () => {}, + logoutHandler + ) + .demandCommand(1, 'You must provide an auth command'); + }, + () => {} + ); + + // Flat structure (backwards compatible, hidden from help) + y.command({ + command: 'login', + describe: false as any, // Hide from help + builder: (y: Argv) => + y.option('api-key', { + type: 'string', + describe: 'API key to store directly', + }), + handler: loginHandler, + } as any) + .command({ + command: 'env', + describe: false as any, // Hide from help + builder: (y: Argv) => y, + handler: envHandler, + } as any) + .command({ + command: 'logout', + describe: false as any, // Hide from help + builder: (y: Argv) => y, + handler: logoutHandler, + } as any); + + return y; +} diff --git a/libs/typescript/cua-cli/src/commands/sandbox.ts b/libs/typescript/cua-cli/src/commands/sandbox.ts new file mode 100644 index 00000000..5d2bde93 --- /dev/null +++ b/libs/typescript/cua-cli/src/commands/sandbox.ts @@ -0,0 +1,439 @@ +import type { Argv } from 'yargs'; +import { ensureApiKeyInteractive } from '../auth'; +import { WEBSITE_URL } from '../config'; +import { http } from '../http'; +import { clearApiKey } from '../storage'; +import type { SandboxItem } from '../util'; +import { openInBrowser, printSandboxList } from '../util'; + +// Command handlers +const listHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const res = await http('/v1/vms', { token }); + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + if (!res.ok) { + console.error(`Request failed: ${res.status}`); + process.exit(1); + } + const data = (await res.json()) as SandboxItem[]; + printSandboxList(data, Boolean(argv['show-passwords'])); +}; + +const createHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const { os, size, region } = argv as { + os: string; + size: string; + region: string; + }; + + const res = await http('/v1/vms', { + token, + method: 'POST', + body: { os, configuration: size, region }, + }); + + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + + if (res.status === 400) { + console.error('Invalid request or unsupported configuration'); + process.exit(1); + } + + if (res.status === 500) { + console.error('Internal server error'); + process.exit(1); + } + + if (res.status === 200) { + const data = (await res.json()) as { + status: string; + name: string; + password: string; + host: string; + }; + console.log(`Sandbox created and ready: ${data.name}`); + console.log(`Password: ${data.password}`); + console.log(`Host: ${data.host}`); + return; + } + + if (res.status === 202) { + const data = (await res.json()) as { + status: string; + name: string; + job_id: string; + }; + console.log(`Sandbox provisioning started: ${data.name}`); + console.log(`Job ID: ${data.job_id}`); + console.log("Use 'cua list' to monitor provisioning progress"); + return; + } + + console.error(`Unexpected status: ${res.status}`); + process.exit(1); +}; + +const deleteHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const name = String((argv as any).name); + const res = await http(`/v1/vms/${encodeURIComponent(name)}`, { + token, + method: 'DELETE', + }); + + if (res.status === 202) { + const body = (await res.json().catch(() => ({}))) as { + status?: string; + }; + console.log(`Sandbox deletion initiated: ${body.status ?? 'deleting'}`); + return; + } + + if (res.status === 404) { + console.error('Sandbox not found or not owned by you'); + process.exit(1); + } + + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + + console.error(`Unexpected status: ${res.status}`); + process.exit(1); +}; + +const startHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const name = String((argv as any).name); + const res = await http(`/v1/vms/${encodeURIComponent(name)}/start`, { + token, + method: 'POST', + }); + if (res.status === 204) { + console.log('Start accepted'); + return; + } + if (res.status === 404) { + console.error('Sandbox not found'); + process.exit(1); + } + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + console.error(`Unexpected status: ${res.status}`); + process.exit(1); +}; + +const stopHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const name = String((argv as any).name); + const res = await http(`/v1/vms/${encodeURIComponent(name)}/stop`, { + token, + method: 'POST', + }); + if (res.status === 202) { + const body = (await res.json().catch(() => ({}))) as { + status?: string; + }; + console.log(body.status ?? 'stopping'); + return; + } + if (res.status === 404) { + console.error('Sandbox not found'); + process.exit(1); + } + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + console.error(`Unexpected status: ${res.status}`); + process.exit(1); +}; + +const restartHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const name = String((argv as any).name); + const res = await http(`/v1/vms/${encodeURIComponent(name)}/restart`, { + token, + method: 'POST', + }); + if (res.status === 202) { + const body = (await res.json().catch(() => ({}))) as { + status?: string; + }; + console.log(body.status ?? 'restarting'); + return; + } + if (res.status === 404) { + console.error('Sandbox not found'); + process.exit(1); + } + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + console.error(`Unexpected status: ${res.status}`); + process.exit(1); +}; + +const suspendHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const name = String((argv as any).name); + const res = await http(`/v1/vms/${encodeURIComponent(name)}/suspend`, { + token, + method: 'POST', + }); + if (res.status === 202) { + const body = (await res.json().catch(() => ({}))) as { + status?: string; + }; + console.log(body.status ?? 'suspending'); + return; + } + if (res.status === 404) { + console.error('Sandbox not found'); + process.exit(1); + } + if (res.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + if (res.status === 400 || res.status === 500) { + const body = (await res.json().catch(() => ({}))) as { error?: string }; + console.error( + body.error ?? + "Suspend not supported for this VM. Use 'cua sb stop' instead." + ); + process.exit(1); + } + console.error(`Unexpected status: ${res.status}`); + process.exit(1); +}; + +const openHandler = async (argv: Record) => { + const token = await ensureApiKeyInteractive(); + const name = String((argv as any).name); + const listRes = await http('/v1/vms', { token }); + if (listRes.status === 401) { + clearApiKey(); + console.error("Unauthorized. Try 'cua login' again."); + process.exit(1); + } + if (!listRes.ok) { + console.error(`Request failed: ${listRes.status}`); + process.exit(1); + } + const sandboxes = (await listRes.json()) as SandboxItem[]; + const sandbox = sandboxes.find((s) => s.name === name); + if (!sandbox) { + console.error('Sandbox not found'); + process.exit(1); + } + const host = + sandbox.host && sandbox.host.length + ? sandbox.host + : `${sandbox.name}.sandbox.cua.ai`; + const url = `https://${host}/vnc.html?autoconnect=true&password=${encodeURIComponent(sandbox.password)}`; + console.log(`Opening NoVNC: ${url}`); + await openInBrowser(url); +}; + +// Register commands in both flat and grouped structures +export function registerSandboxCommands(y: Argv) { + // Grouped structure: cua sandbox or cua sb (register first to appear first in help) + y.command( + ['sandbox', 'sb'], + 'Create and manage cloud sandboxes (Linux, Windows, or macOS)', + (y) => { + return y + .command( + ['list', 'ls', 'ps'], + 'List all your sandboxes with status and connection details', + (y) => + y.option('show-passwords', { + type: 'boolean', + default: false, + describe: 'Show sandbox passwords in output', + }), + listHandler + ) + .command( + 'create', + 'Provision a new cloud sandbox in your chosen OS, size, and region', + (y) => + y + .option('os', { + type: 'string', + choices: ['linux', 'windows', 'macos'], + demandOption: true, + describe: 'Operating system', + }) + .option('size', { + type: 'string', + choices: ['small', 'medium', 'large'], + demandOption: true, + describe: 'Sandbox size', + }) + .option('region', { + type: 'string', + choices: [ + 'north-america', + 'europe', + 'asia-pacific', + 'south-america', + ], + demandOption: true, + describe: 'Sandbox region', + }), + createHandler + ) + .command( + 'delete ', + 'Permanently delete a sandbox and all its data', + (y) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + deleteHandler + ) + .command( + 'start ', + 'Start a stopped sandbox', + (y) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + startHandler + ) + .command( + 'stop ', + 'Stop a running sandbox (data is preserved)', + (y) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + stopHandler + ) + .command( + 'restart ', + 'Restart a sandbox (reboot the system)', + (y) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + restartHandler + ) + .command( + 'suspend ', + 'Suspend a sandbox, preserving memory state (use start to resume)', + (y) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + suspendHandler + ) + .command( + ['vnc ', 'open '], + 'Open remote desktop (VNC) connection in your browser', + (y) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + openHandler + ) + .demandCommand(1, 'You must provide a sandbox command'); + }, + () => {} + ); + + // Flat structure (backwards compatible, hidden from help) + y.command({ + command: ['list', 'ls', 'ps'], + describe: false as any, // Hide from help + builder: (y: Argv) => + y.option('show-passwords', { + type: 'boolean', + default: false, + describe: 'Show sandbox passwords in output', + }), + handler: listHandler, + } as any) + .command({ + command: 'create', + describe: false as any, // Hide from help + builder: (y: Argv) => + y + .option('os', { + type: 'string', + choices: ['linux', 'windows', 'macos'], + demandOption: true, + describe: 'Operating system', + }) + .option('size', { + type: 'string', + choices: ['small', 'medium', 'large'], + demandOption: true, + describe: 'Sandbox size', + }) + .option('region', { + type: 'string', + choices: [ + 'north-america', + 'europe', + 'asia-pacific', + 'south-america', + ], + demandOption: true, + describe: 'Sandbox region', + }), + handler: createHandler, + } as any) + .command({ + command: 'delete ', + describe: false as any, // Hide from help + builder: (y: Argv) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + handler: deleteHandler, + } as any) + .command({ + command: 'start ', + describe: false as any, // Hide from help + builder: (y: Argv) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + handler: startHandler, + } as any) + .command({ + command: 'stop ', + describe: false as any, // Hide from help + builder: (y: Argv) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + handler: stopHandler, + } as any) + .command({ + command: 'restart ', + describe: false as any, // Hide from help + builder: (y: Argv) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + handler: restartHandler, + } as any) + .command({ + command: 'suspend ', + describe: false as any, // Hide from help + builder: (y: Argv) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + handler: suspendHandler, + } as any) + .command({ + command: ['vnc ', 'open '], + describe: false as any, // Hide from help + builder: (y: Argv) => + y.positional('name', { type: 'string', describe: 'Sandbox name' }), + handler: openHandler, + } as any); + + return y; +} diff --git a/libs/typescript/cua-cli/src/config.ts b/libs/typescript/cua-cli/src/config.ts new file mode 100644 index 00000000..439bb324 --- /dev/null +++ b/libs/typescript/cua-cli/src/config.ts @@ -0,0 +1,19 @@ +export const WEBSITE_URL = + Bun.env.CUA_WEBSITE_URL?.replace(/\/$/, '') || 'https://cua.ai'; +export const API_BASE = + Bun.env.CUA_API_BASE?.replace(/\/$/, '') || 'https://api.cua.ai'; +export const AUTH_PAGE = `${WEBSITE_URL}/cli-auth`; +export const CALLBACK_HOST = '127.0.0.1'; + +export function getConfigDir(): string { + const home = Bun.env.HOME || Bun.env.USERPROFILE || '.'; + const dir = `${home}/.cua`; + try { + Bun.spawnSync(['mkdir', '-p', dir]); + } catch {} + return dir; +} + +export function getDbPath(): string { + return `${getConfigDir()}/cli.sqlite`; +} diff --git a/libs/typescript/cua-cli/src/http.ts b/libs/typescript/cua-cli/src/http.ts new file mode 100644 index 00000000..4f5f3ac2 --- /dev/null +++ b/libs/typescript/cua-cli/src/http.ts @@ -0,0 +1,17 @@ +import { API_BASE } from './config'; + +export async function http( + path: string, + opts: { method?: string; token: string; body?: any } +): Promise { + const url = `${API_BASE}${path}`; + const headers: Record = { + Authorization: `Bearer ${opts.token}`, + }; + if (opts.body) headers['content-type'] = 'application/json'; + return fetch(url, { + method: opts.method || 'GET', + headers, + body: opts.body ? JSON.stringify(opts.body) : undefined, + }); +} diff --git a/libs/typescript/cua-cli/src/storage.ts b/libs/typescript/cua-cli/src/storage.ts new file mode 100644 index 00000000..5de51cab --- /dev/null +++ b/libs/typescript/cua-cli/src/storage.ts @@ -0,0 +1,44 @@ +import { Database } from 'bun:sqlite'; +import { getDbPath } from './config'; + +function getDb(): Database { + const db = new Database(getDbPath()); + db.exec('PRAGMA journal_mode = WAL;'); + db.exec( + 'CREATE TABLE IF NOT EXISTS kv (k TEXT PRIMARY KEY, v TEXT NOT NULL);' + ); + return db; +} + +export function setApiKey(token: string) { + const db = getDb(); + try { + const stmt = db.query( + "INSERT INTO kv (k, v) VALUES ('api_key', ?) ON CONFLICT(k) DO UPDATE SET v=excluded.v" + ); + stmt.run(token); + } finally { + db.close(); + } +} + +export function getApiKey(): string | null { + const db = getDb(); + try { + const row = db.query("SELECT v FROM kv WHERE k='api_key'").get() as + | { v: string } + | undefined; + return row?.v ?? null; + } finally { + db.close(); + } +} + +export function clearApiKey() { + const db = getDb(); + try { + db.query("DELETE FROM kv WHERE k='api_key'").run(); + } finally { + db.close(); + } +} diff --git a/libs/typescript/cua-cli/src/util.ts b/libs/typescript/cua-cli/src/util.ts new file mode 100644 index 00000000..60147049 --- /dev/null +++ b/libs/typescript/cua-cli/src/util.ts @@ -0,0 +1,79 @@ +export async function writeEnvFile(cwd: string, key: string) { + const path = `${cwd}/.env`; + let content = ''; + try { + content = await Bun.file(path).text(); + } catch {} + const lines = content.split(/\r?\n/).filter(Boolean); + const idx = lines.findIndex((l) => l.startsWith('CUA_API_KEY=')); + if (idx >= 0) lines[idx] = `CUA_API_KEY=${key}`; + else lines.push(`CUA_API_KEY=${key}`); + await Bun.write(path, lines.join('\n') + '\n'); + return path; +} + +export type SandboxStatus = + | 'pending' + | 'running' + | 'stopped' + | 'suspended' + | 'suspending' + | 'terminated' + | 'failed'; +export type SandboxItem = { + name: string; + password: string; + status: SandboxStatus; + host?: string; +}; + +export function printSandboxList( + items: SandboxItem[], + showPasswords: boolean = false +) { + const headers = showPasswords + ? ['NAME', 'STATUS', 'PASSWORD', 'HOST'] + : ['NAME', 'STATUS', 'HOST']; + + const rows: string[][] = [ + headers, + ...items.map((v) => + showPasswords + ? [v.name, String(v.status), v.password, v.host || ''] + : [v.name, String(v.status), v.host || ''] + ), + ]; + + const numCols = headers.length; + const widths: number[] = new Array(numCols).fill(0); + + for (const r of rows) + for (let i = 0; i < numCols; i++) + widths[i] = Math.max(widths[i] ?? 0, (r[i] ?? '').length); + + for (const r of rows) + console.log(r.map((c, i) => (c ?? '').padEnd(widths[i] ?? 0)).join(' ')); + + if (items.length === 0) console.log('No sandboxes found'); +} + +export async function openInBrowser(url: string) { + const platform = process.platform; + let cmd: string; + let args: string[] = []; + if (platform === 'darwin') { + cmd = 'open'; + args = [url]; + } else if (platform === 'win32') { + cmd = 'cmd'; + args = ['/c', 'start', '', url]; + } else { + cmd = 'xdg-open'; + args = [url]; + } + try { + await Bun.spawn({ cmd: [cmd, ...args] }).exited; + } catch { + console.error(`Failed to open browser. Please visit: ${url}`); + } +} diff --git a/libs/typescript/cua-cli/tsconfig.json b/libs/typescript/cua-cli/tsconfig.json new file mode 100644 index 00000000..bfa0fead --- /dev/null +++ b/libs/typescript/cua-cli/tsconfig.json @@ -0,0 +1,29 @@ +{ + "compilerOptions": { + // Environment setup & latest features + "lib": ["ESNext"], + "target": "ESNext", + "module": "Preserve", + "moduleDetection": "force", + "jsx": "react-jsx", + "allowJs": true, + + // Bundler mode + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + + // Best practices + "strict": true, + "skipLibCheck": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": true, + "noImplicitOverride": true, + + // Some stricter flags (disabled by default) + "noUnusedLocals": false, + "noUnusedParameters": false, + "noPropertyAccessFromIndexSignature": false + } +} diff --git a/libs/typescript/pnpm-lock.yaml b/libs/typescript/pnpm-lock.yaml index ee1d6fb9..99fd2acc 100644 --- a/libs/typescript/pnpm-lock.yaml +++ b/libs/typescript/pnpm-lock.yaml @@ -16,69 +16,69 @@ importers: dependencies: '@trycua/core': specifier: ^0.1.2 - version: 0.1.2 + version: link:../core peerjs: specifier: ^1.5.4 version: 1.5.5 pino: specifier: ^9.7.0 - version: 9.7.0 + version: 9.14.0 devDependencies: '@types/node': specifier: ^22.15.17 - version: 22.15.34 + version: 22.19.1 bumpp: specifier: ^10.1.0 - version: 10.2.0 + version: 10.3.1 happy-dom: specifier: ^17.4.7 version: 17.6.3 tsdown: specifier: ^0.14.1 - version: 0.14.1(typescript@5.8.3) + version: 0.14.2(typescript@5.9.3) typescript: specifier: ^5.7.2 - version: 5.8.3 + version: 5.9.3 vitest: specifier: ^2.1.8 - version: 2.1.9(@types/node@22.15.34)(happy-dom@17.6.3) + version: 2.1.9(@types/node@22.19.1)(happy-dom@17.6.3) computer: dependencies: '@trycua/core': specifier: ^0.1.2 - version: 0.1.2 + version: link:../core pino: specifier: ^9.7.0 - version: 9.7.0 + version: 9.14.0 ws: specifier: ^8.18.0 version: 8.18.3 devDependencies: '@types/node': specifier: ^22.15.17 - version: 22.15.34 + version: 22.19.1 '@types/ws': specifier: ^8.18.1 version: 8.18.1 bumpp: specifier: ^10.1.0 - version: 10.2.0 + version: 10.3.1 happy-dom: specifier: ^17.4.7 version: 17.6.3 tsdown: specifier: ^0.11.9 - version: 0.11.13(typescript@5.8.3) + version: 0.11.13(typescript@5.9.3) tsx: specifier: ^4.19.4 - version: 4.20.3 + version: 4.20.6 typescript: specifier: ^5.8.3 - version: 5.8.3 + version: 5.9.3 vitest: specifier: ^3.1.3 - version: 3.2.4(@types/node@22.15.34)(happy-dom@17.6.3)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0) + version: 3.2.4(@types/node@22.19.1)(happy-dom@17.6.3)(tsx@4.20.6) core: dependencies: @@ -87,89 +87,67 @@ importers: version: 10.0.0 pino: specifier: ^9.7.0 - version: 9.7.0 + version: 9.14.0 posthog-node: specifier: ^5.1.1 - version: 5.1.1 + version: 5.13.3 uuid: specifier: ^11.1.0 version: 11.1.0 devDependencies: '@types/node': specifier: ^22.15.17 - version: 22.15.34 + version: 22.19.1 '@types/ws': specifier: ^8.18.1 version: 8.18.1 bumpp: specifier: ^10.1.0 - version: 10.2.0 + version: 10.3.1 happy-dom: specifier: ^17.4.7 version: 17.6.3 tsdown: specifier: ^0.11.9 - version: 0.11.13(typescript@5.8.3) + version: 0.11.13(typescript@5.9.3) tsx: specifier: ^4.19.4 - version: 4.20.3 + version: 4.20.6 typescript: specifier: ^5.8.3 - version: 5.8.3 + version: 5.9.3 vitest: specifier: ^3.1.3 - version: 3.2.4(@types/node@22.15.34)(happy-dom@17.6.3)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0) + version: 3.2.4(@types/node@22.19.1)(happy-dom@17.6.3)(tsx@4.20.6) packages: - '@babel/generator@7.27.5': - resolution: {integrity: sha512-ZGhA37l0e/g2s1Cnzdix0O3aLYm66eF8aufiVteOgnwxgnRP8GoyMj7VWsgWnQbVKXyge7hqrFh2K2TQM6t1Hw==} - engines: {node: '>=6.9.0'} - - '@babel/generator@7.28.3': - resolution: {integrity: sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==} + '@babel/generator@7.28.5': + resolution: {integrity: sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==} engines: {node: '>=6.9.0'} '@babel/helper-string-parser@7.27.1': resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} engines: {node: '>=6.9.0'} - '@babel/helper-validator-identifier@7.27.1': - resolution: {integrity: sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==} + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} engines: {node: '>=6.9.0'} - '@babel/parser@7.27.7': - resolution: {integrity: sha512-qnzXzDXdr/po3bOTbTIQZ7+TxNKxpkN5IifVLXS+r7qwynkZfPyjZfE7hCXbo7IoO9TNcSyibgONsf2HauUd3Q==} + '@babel/parser@7.28.5': + resolution: {integrity: sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==} engines: {node: '>=6.0.0'} hasBin: true - '@babel/parser@7.28.3': - resolution: {integrity: sha512-7+Ey1mAgYqFAx2h0RuoxcQT5+MlG3GTV0TQrgr7/ZliKsm/MNDxVVutlWaziMq7wJNAz8MTqz55XLpWvva6StA==} - engines: {node: '>=6.0.0'} - hasBin: true - - '@babel/types@7.27.7': - resolution: {integrity: sha512-8OLQgDScAOHXnAz2cV+RfzzNMipuLVBz2biuAJFMV9bfkNf393je3VM8CLkjQodW5+iWsSJdSgSWT6rsZoXHPw==} + '@babel/types@7.28.5': + resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} engines: {node: '>=6.9.0'} - '@babel/types@7.28.2': - resolution: {integrity: sha512-ruv7Ae4J5dUYULmeXw1gmb7rYRz57OWCPM57pHojnLq/3Z1CK2lNSLTCVjxVk1F/TZHwOZZrOWi0ur95BbLxNQ==} - engines: {node: '>=6.9.0'} + '@emnapi/core@1.7.1': + resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==} - '@emnapi/core@1.4.3': - resolution: {integrity: sha512-4m62DuCE07lw01soJwPiBGC0nAww0Q+RY70VZ+n49yDIO13yyinhbWCeNnaob0lakDtWQzSdtNWzJeOJt2ma+g==} - - '@emnapi/core@1.5.0': - resolution: {integrity: sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg==} - - '@emnapi/runtime@1.4.3': - resolution: {integrity: sha512-pBPWdu6MLKROBX05wSNKcNb++m5Er+KQ9QkB+WVM+pW2Kx9hoSrVTnu3BdkI5eBLZoKu/J6mW/B6i6bJB2ytXQ==} - - '@emnapi/runtime@1.5.0': - resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==} - - '@emnapi/wasi-threads@1.0.2': - resolution: {integrity: sha512-5n3nTJblwRi8LlXkJ9eBzu+kZR8Yxcc7ubakyQTFzPMtIhFpUBRbsnc2Dv88IZDIbCDlBiWrknhB4Lsz7mg6BA==} + '@emnapi/runtime@1.7.1': + resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==} '@emnapi/wasi-threads@1.1.0': resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==} @@ -180,8 +158,8 @@ packages: cpu: [ppc64] os: [aix] - '@esbuild/aix-ppc64@0.25.5': - resolution: {integrity: sha512-9o3TMmpmftaCMepOdA5k/yDw8SfInyzWWTjYTFCX3kPSDJMROQTb8jg+h9Cnwnmm1vOzvxN7gIfB5V2ewpjtGA==} + '@esbuild/aix-ppc64@0.25.12': + resolution: {integrity: sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==} engines: {node: '>=18'} cpu: [ppc64] os: [aix] @@ -192,8 +170,8 @@ packages: cpu: [arm64] os: [android] - '@esbuild/android-arm64@0.25.5': - resolution: {integrity: sha512-VGzGhj4lJO+TVGV1v8ntCZWJktV7SGCs3Pn1GRWI1SBFtRALoomm8k5E9Pmwg3HOAal2VDc2F9+PM/rEY6oIDg==} + '@esbuild/android-arm64@0.25.12': + resolution: {integrity: sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==} engines: {node: '>=18'} cpu: [arm64] os: [android] @@ -204,8 +182,8 @@ packages: cpu: [arm] os: [android] - '@esbuild/android-arm@0.25.5': - resolution: {integrity: sha512-AdJKSPeEHgi7/ZhuIPtcQKr5RQdo6OO2IL87JkianiMYMPbCtot9fxPbrMiBADOWWm3T2si9stAiVsGbTQFkbA==} + '@esbuild/android-arm@0.25.12': + resolution: {integrity: sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==} engines: {node: '>=18'} cpu: [arm] os: [android] @@ -216,8 +194,8 @@ packages: cpu: [x64] os: [android] - '@esbuild/android-x64@0.25.5': - resolution: {integrity: sha512-D2GyJT1kjvO//drbRT3Hib9XPwQeWd9vZoBJn+bu/lVsOZ13cqNdDeqIF/xQ5/VmWvMduP6AmXvylO/PIc2isw==} + '@esbuild/android-x64@0.25.12': + resolution: {integrity: sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==} engines: {node: '>=18'} cpu: [x64] os: [android] @@ -228,8 +206,8 @@ packages: cpu: [arm64] os: [darwin] - '@esbuild/darwin-arm64@0.25.5': - resolution: {integrity: sha512-GtaBgammVvdF7aPIgH2jxMDdivezgFu6iKpmT+48+F8Hhg5J/sfnDieg0aeG/jfSvkYQU2/pceFPDKlqZzwnfQ==} + '@esbuild/darwin-arm64@0.25.12': + resolution: {integrity: sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==} engines: {node: '>=18'} cpu: [arm64] os: [darwin] @@ -240,8 +218,8 @@ packages: cpu: [x64] os: [darwin] - '@esbuild/darwin-x64@0.25.5': - resolution: {integrity: sha512-1iT4FVL0dJ76/q1wd7XDsXrSW+oLoquptvh4CLR4kITDtqi2e/xwXwdCVH8hVHU43wgJdsq7Gxuzcs6Iq/7bxQ==} + '@esbuild/darwin-x64@0.25.12': + resolution: {integrity: sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==} engines: {node: '>=18'} cpu: [x64] os: [darwin] @@ -252,8 +230,8 @@ packages: cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-arm64@0.25.5': - resolution: {integrity: sha512-nk4tGP3JThz4La38Uy/gzyXtpkPW8zSAmoUhK9xKKXdBCzKODMc2adkB2+8om9BDYugz+uGV7sLmpTYzvmz6Sw==} + '@esbuild/freebsd-arm64@0.25.12': + resolution: {integrity: sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==} engines: {node: '>=18'} cpu: [arm64] os: [freebsd] @@ -264,8 +242,8 @@ packages: cpu: [x64] os: [freebsd] - '@esbuild/freebsd-x64@0.25.5': - resolution: {integrity: sha512-PrikaNjiXdR2laW6OIjlbeuCPrPaAl0IwPIaRv+SMV8CiM8i2LqVUHFC1+8eORgWyY7yhQY+2U2fA55mBzReaw==} + '@esbuild/freebsd-x64@0.25.12': + resolution: {integrity: sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==} engines: {node: '>=18'} cpu: [x64] os: [freebsd] @@ -276,8 +254,8 @@ packages: cpu: [arm64] os: [linux] - '@esbuild/linux-arm64@0.25.5': - resolution: {integrity: sha512-Z9kfb1v6ZlGbWj8EJk9T6czVEjjq2ntSYLY2cw6pAZl4oKtfgQuS4HOq41M/BcoLPzrUbNd+R4BXFyH//nHxVg==} + '@esbuild/linux-arm64@0.25.12': + resolution: {integrity: sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==} engines: {node: '>=18'} cpu: [arm64] os: [linux] @@ -288,8 +266,8 @@ packages: cpu: [arm] os: [linux] - '@esbuild/linux-arm@0.25.5': - resolution: {integrity: sha512-cPzojwW2okgh7ZlRpcBEtsX7WBuqbLrNXqLU89GxWbNt6uIg78ET82qifUy3W6OVww6ZWobWub5oqZOVtwolfw==} + '@esbuild/linux-arm@0.25.12': + resolution: {integrity: sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==} engines: {node: '>=18'} cpu: [arm] os: [linux] @@ -300,8 +278,8 @@ packages: cpu: [ia32] os: [linux] - '@esbuild/linux-ia32@0.25.5': - resolution: {integrity: sha512-sQ7l00M8bSv36GLV95BVAdhJ2QsIbCuCjh/uYrWiMQSUuV+LpXwIqhgJDcvMTj+VsQmqAHL2yYaasENvJ7CDKA==} + '@esbuild/linux-ia32@0.25.12': + resolution: {integrity: sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==} engines: {node: '>=18'} cpu: [ia32] os: [linux] @@ -312,8 +290,8 @@ packages: cpu: [loong64] os: [linux] - '@esbuild/linux-loong64@0.25.5': - resolution: {integrity: sha512-0ur7ae16hDUC4OL5iEnDb0tZHDxYmuQyhKhsPBV8f99f6Z9KQM02g33f93rNH5A30agMS46u2HP6qTdEt6Q1kg==} + '@esbuild/linux-loong64@0.25.12': + resolution: {integrity: sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==} engines: {node: '>=18'} cpu: [loong64] os: [linux] @@ -324,8 +302,8 @@ packages: cpu: [mips64el] os: [linux] - '@esbuild/linux-mips64el@0.25.5': - resolution: {integrity: sha512-kB/66P1OsHO5zLz0i6X0RxlQ+3cu0mkxS3TKFvkb5lin6uwZ/ttOkP3Z8lfR9mJOBk14ZwZ9182SIIWFGNmqmg==} + '@esbuild/linux-mips64el@0.25.12': + resolution: {integrity: sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==} engines: {node: '>=18'} cpu: [mips64el] os: [linux] @@ -336,8 +314,8 @@ packages: cpu: [ppc64] os: [linux] - '@esbuild/linux-ppc64@0.25.5': - resolution: {integrity: sha512-UZCmJ7r9X2fe2D6jBmkLBMQetXPXIsZjQJCjgwpVDz+YMcS6oFR27alkgGv3Oqkv07bxdvw7fyB71/olceJhkQ==} + '@esbuild/linux-ppc64@0.25.12': + resolution: {integrity: sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==} engines: {node: '>=18'} cpu: [ppc64] os: [linux] @@ -348,8 +326,8 @@ packages: cpu: [riscv64] os: [linux] - '@esbuild/linux-riscv64@0.25.5': - resolution: {integrity: sha512-kTxwu4mLyeOlsVIFPfQo+fQJAV9mh24xL+y+Bm6ej067sYANjyEw1dNHmvoqxJUCMnkBdKpvOn0Ahql6+4VyeA==} + '@esbuild/linux-riscv64@0.25.12': + resolution: {integrity: sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==} engines: {node: '>=18'} cpu: [riscv64] os: [linux] @@ -360,8 +338,8 @@ packages: cpu: [s390x] os: [linux] - '@esbuild/linux-s390x@0.25.5': - resolution: {integrity: sha512-K2dSKTKfmdh78uJ3NcWFiqyRrimfdinS5ErLSn3vluHNeHVnBAFWC8a4X5N+7FgVE1EjXS1QDZbpqZBjfrqMTQ==} + '@esbuild/linux-s390x@0.25.12': + resolution: {integrity: sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==} engines: {node: '>=18'} cpu: [s390x] os: [linux] @@ -372,14 +350,14 @@ packages: cpu: [x64] os: [linux] - '@esbuild/linux-x64@0.25.5': - resolution: {integrity: sha512-uhj8N2obKTE6pSZ+aMUbqq+1nXxNjZIIjCjGLfsWvVpy7gKCOL6rsY1MhRh9zLtUtAI7vpgLMK6DxjO8Qm9lJw==} + '@esbuild/linux-x64@0.25.12': + resolution: {integrity: sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==} engines: {node: '>=18'} cpu: [x64] os: [linux] - '@esbuild/netbsd-arm64@0.25.5': - resolution: {integrity: sha512-pwHtMP9viAy1oHPvgxtOv+OkduK5ugofNTVDilIzBLpoWAM16r7b/mxBvfpuQDpRQFMfuVr5aLcn4yveGvBZvw==} + '@esbuild/netbsd-arm64@0.25.12': + resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} engines: {node: '>=18'} cpu: [arm64] os: [netbsd] @@ -390,14 +368,14 @@ packages: cpu: [x64] os: [netbsd] - '@esbuild/netbsd-x64@0.25.5': - resolution: {integrity: sha512-WOb5fKrvVTRMfWFNCroYWWklbnXH0Q5rZppjq0vQIdlsQKuw6mdSihwSo4RV/YdQ5UCKKvBy7/0ZZYLBZKIbwQ==} + '@esbuild/netbsd-x64@0.25.12': + resolution: {integrity: sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==} engines: {node: '>=18'} cpu: [x64] os: [netbsd] - '@esbuild/openbsd-arm64@0.25.5': - resolution: {integrity: sha512-7A208+uQKgTxHd0G0uqZO8UjK2R0DDb4fDmERtARjSHWxqMTye4Erz4zZafx7Di9Cv+lNHYuncAkiGFySoD+Mw==} + '@esbuild/openbsd-arm64@0.25.12': + resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} engines: {node: '>=18'} cpu: [arm64] os: [openbsd] @@ -408,20 +386,26 @@ packages: cpu: [x64] os: [openbsd] - '@esbuild/openbsd-x64@0.25.5': - resolution: {integrity: sha512-G4hE405ErTWraiZ8UiSoesH8DaCsMm0Cay4fsFWOOUcz8b8rC6uCvnagr+gnioEjWn0wC+o1/TAHt+It+MpIMg==} + '@esbuild/openbsd-x64@0.25.12': + resolution: {integrity: sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==} engines: {node: '>=18'} cpu: [x64] os: [openbsd] + '@esbuild/openharmony-arm64@0.25.12': + resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + '@esbuild/sunos-x64@0.21.5': resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} engines: {node: '>=12'} cpu: [x64] os: [sunos] - '@esbuild/sunos-x64@0.25.5': - resolution: {integrity: sha512-l+azKShMy7FxzY0Rj4RCt5VD/q8mG/e+mDivgspo+yL8zW7qEwctQ6YqKX34DTEleFAvCIUviCFX1SDZRSyMQA==} + '@esbuild/sunos-x64@0.25.12': + resolution: {integrity: sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==} engines: {node: '>=18'} cpu: [x64] os: [sunos] @@ -432,8 +416,8 @@ packages: cpu: [arm64] os: [win32] - '@esbuild/win32-arm64@0.25.5': - resolution: {integrity: sha512-O2S7SNZzdcFG7eFKgvwUEZ2VG9D/sn/eIiz8XRZ1Q/DO5a3s76Xv0mdBzVM5j5R639lXQmPmSo0iRpHqUUrsxw==} + '@esbuild/win32-arm64@0.25.12': + resolution: {integrity: sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==} engines: {node: '>=18'} cpu: [arm64] os: [win32] @@ -444,8 +428,8 @@ packages: cpu: [ia32] os: [win32] - '@esbuild/win32-ia32@0.25.5': - resolution: {integrity: sha512-onOJ02pqs9h1iMJ1PQphR+VZv8qBMQ77Klcsqv9CNW2w6yLqoURLcgERAIurY6QE63bbLuqgP9ATqajFLK5AMQ==} + '@esbuild/win32-ia32@0.25.12': + resolution: {integrity: sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==} engines: {node: '>=18'} cpu: [ia32] os: [win32] @@ -456,15 +440,12 @@ packages: cpu: [x64] os: [win32] - '@esbuild/win32-x64@0.25.5': - resolution: {integrity: sha512-TXv6YnJ8ZMVdX+SXWVBo/0p8LTcrUYngpWjvm91TMjjBQii7Oz11Lw5lbDV5Y0TzuhSJHwiH4hEtC1I42mMS0g==} + '@esbuild/win32-x64@0.25.12': + resolution: {integrity: sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==} engines: {node: '>=18'} cpu: [x64] os: [win32] - '@jridgewell/gen-mapping@0.3.10': - resolution: {integrity: sha512-HM2F4B9N4cA0RH2KQiIZOHAZqtP4xGS4IZ+SFe1SIbO4dyjf9MTY2Bo3vHYnm0hglWfXqBrzUBSa+cJfl3Xvrg==} - '@jridgewell/gen-mapping@0.3.13': resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} @@ -472,21 +453,18 @@ packages: resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} engines: {node: '>=6.0.0'} - '@jridgewell/sourcemap-codec@1.5.2': - resolution: {integrity: sha512-gKYheCylLIedI+CSZoDtGkFV9YEBxRRVcfCH7OfAqh4TyUyRjEE6WVE/aXDXX0p8BIe/QgLcaAoI0220KRRFgg==} + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} - '@jridgewell/trace-mapping@0.3.27': - resolution: {integrity: sha512-VO95AxtSFMelbg3ouljAYnfvTEwSWVt/2YLf+U5Ejd8iT5mXE2Sa/1LGyvySMne2CGsepGLI7KpF3EzE3Aq9Mg==} - - '@jridgewell/trace-mapping@0.3.30': - resolution: {integrity: sha512-GQ7Nw5G2lTu/BtHTKfXhKHok2WGetd4XYcVKGx00SjAk8GMwgJM3zr6zORiPGuOE+/vkc90KtTosSSvaCjKb2Q==} + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} '@msgpack/msgpack@2.8.0': resolution: {integrity: sha512-h9u4u/jiIRKbq25PM+zymTyW6bhTzELvOoUd+AvYriWOAKpLGnIamaET3pnHYoI5iYphAHBI4ayx0MehR+VVPQ==} engines: {node: '>= 10'} - '@napi-rs/wasm-runtime@0.2.11': - resolution: {integrity: sha512-9DPkXtvHydrcOsopiYpUgPHpmj0HWZKMUnL2dZqpvC42lsratuBG06V5ipyno0fUek5VlFsNQ+AcFATSrJXgMA==} + '@napi-rs/wasm-runtime@0.2.12': + resolution: {integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==} '@napi-rs/wasm-runtime@1.0.7': resolution: {integrity: sha512-SeDnOO0Tk7Okiq6DbXmmBODgOAb9dp9gjlphokTUxmt8U3liIP1ZsozBahH69j/RJv+Rfs6IwUKHTgQYJ/HBAw==} @@ -494,21 +472,26 @@ packages: '@oxc-project/types@0.70.0': resolution: {integrity: sha512-ngyLUpUjO3dpqygSRQDx7nMx8+BmXbWOU4oIwTJFV2MVIDG7knIZwgdwXlQWLg3C3oxg1lS7ppMtPKqKFb7wzw==} - '@oxc-project/types@0.95.0': - resolution: {integrity: sha512-vACy7vhpMPhjEJhULNxrdR0D943TkA/MigMpJCHmBHvMXxRStRi/dPtTlfQ3uDwWSzRpT8z+7ImjZVf8JWBocQ==} + '@oxc-project/types@0.98.0': + resolution: {integrity: sha512-Vzmd6FsqVuz5HQVcRC/hrx7Ujo3WEVeQP7C2UNP5uy1hUY4SQvMB+93jxkI1KRHz9a/6cni3glPOtvteN+zpsw==} - '@quansync/fs@0.1.3': - resolution: {integrity: sha512-G0OnZbMWEs5LhDyqy2UL17vGhSVHkQIfVojMtEWVenvj0V5S84VBgy86kJIuNsGDp2p7sTKlpSIpBUWdC35OKg==} - engines: {node: '>=20.0.0'} + '@pinojs/redact@0.4.0': + resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==} - '@rolldown/binding-android-arm64@1.0.0-beta.44': - resolution: {integrity: sha512-g9ejDOehJFhxC1DIXQuZQ9bKv4lRDioOTL42cJjFjqKPl1L7DVb9QQQE1FxokGEIMr6FezLipxwnzOXWe7DNPg==} + '@posthog/core@1.5.5': + resolution: {integrity: sha512-m7G1EQTgo9xrr3lZxCp9C2egP99MSRpIDD95wYzwUPxMesKxI0xEQ+TC5LS/XOXIdmsNvsx4UcxwmzhSwD2GWA==} + + '@quansync/fs@0.1.5': + resolution: {integrity: sha512-lNS9hL2aS2NZgNW7BBj+6EBl4rOf8l+tQ0eRY6JWCI8jI2kc53gSoqbjojU0OnAWhzoXiOjFyGsHcDGePB3lhA==} + + '@rolldown/binding-android-arm64@1.0.0-beta.51': + resolution: {integrity: sha512-Ctn8FUXKWWQI9pWC61P1yumS9WjQtelNS9riHwV7oCkknPGaAry4o7eFx2KgoLMnI2BgFJYpW7Im8/zX3BuONg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@rolldown/binding-darwin-arm64@1.0.0-beta.44': - resolution: {integrity: sha512-PxAW1PXLPmCzfhfKIS53kwpjLGTUdIfX4Ht+l9mj05C3lYCGaGowcNsYi2rdxWH24vSTmeK+ajDNRmmmrK0M7g==} + '@rolldown/binding-darwin-arm64@1.0.0-beta.51': + resolution: {integrity: sha512-EL1aRW2Oq15ShUEkBPsDtLMO8GTqfb/ktM/dFaVzXKQiEE96Ss6nexMgfgQrg8dGnNpndFyffVDb5IdSibsu1g==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] @@ -518,8 +501,8 @@ packages: cpu: [arm64] os: [darwin] - '@rolldown/binding-darwin-x64@1.0.0-beta.44': - resolution: {integrity: sha512-/CtQqs1oO9uSb5Ju60rZvsdjE7Pzn8EK2ISAdl2jedjMzeD/4neNyCbwyJOAPzU+GIQTZVyrFZJX+t7HXR1R/g==} + '@rolldown/binding-darwin-x64@1.0.0-beta.51': + resolution: {integrity: sha512-uGtYKlFen9pMIPvkHPWZVDtmYhMQi5g5Ddsndg1gf3atScKYKYgs5aDP4DhHeTwGXQglhfBG7lEaOIZ4UAIWww==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] @@ -529,8 +512,8 @@ packages: cpu: [x64] os: [darwin] - '@rolldown/binding-freebsd-x64@1.0.0-beta.44': - resolution: {integrity: sha512-V5Q5W9c4+2GJ4QabmjmVV6alY97zhC/MZBaLkDtHwGy3qwzbM4DYgXUbun/0a8AH5hGhuU27tUIlYz6ZBlvgOA==} + '@rolldown/binding-freebsd-x64@1.0.0-beta.51': + resolution: {integrity: sha512-JRoVTQtHYbZj1P07JLiuTuXjiBtIa7ag7/qgKA6CIIXnAcdl4LrOf7nfDuHPJcuRKaP5dzecMgY99itvWfmUFQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] @@ -540,8 +523,8 @@ packages: cpu: [x64] os: [freebsd] - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.44': - resolution: {integrity: sha512-X6adjkHeFqKsTU0FXdNN9HY4LDozPqIfHcnXovE5RkYLWIjMWuc489mIZ6iyhrMbCqMUla9IOsh5dvXSGT9o9A==} + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.51': + resolution: {integrity: sha512-BKATVnpPZ0TYBW9XfDwyd4kPGgvf964HiotIwUgpMrFOFYWqpZ+9ONNzMV4UFAYC7Hb5C2qgYQk/qj2OnAd4RQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] @@ -551,8 +534,8 @@ packages: cpu: [arm] os: [linux] - '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.44': - resolution: {integrity: sha512-kRRKGZI4DXWa6ANFr3dLA85aSVkwPdgXaRjfanwY84tfc3LncDiIjyWCb042e3ckPzYhHSZ3LmisO+cdOIYL6Q==} + '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.51': + resolution: {integrity: sha512-xLd7da5jkfbVsBCm1buIRdWtuXY8+hU3+6ESXY/Tk5X5DPHaifrUblhYDgmA34dQt6WyNC2kfXGgrduPEvDI6Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] @@ -562,8 +545,8 @@ packages: cpu: [arm64] os: [linux] - '@rolldown/binding-linux-arm64-musl@1.0.0-beta.44': - resolution: {integrity: sha512-hMtiN9xX1NhxXBa2U3Up4XkVcsVp2h73yYtMDY59z9CDLEZLrik9RVLhBL5QtoX4zZKJ8HZKJtWuGYvtmkCbIQ==} + '@rolldown/binding-linux-arm64-musl@1.0.0-beta.51': + resolution: {integrity: sha512-EQFXTgHxxTzv3t5EmjUP/DfxzFYx9sMndfLsYaAY4DWF6KsK1fXGYsiupif6qPTViPC9eVmRm78q0pZU/kuIPg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] @@ -573,8 +556,8 @@ packages: cpu: [arm64] os: [linux] - '@rolldown/binding-linux-x64-gnu@1.0.0-beta.44': - resolution: {integrity: sha512-rd1LzbpXQuR8MTG43JB9VyXDjG7ogSJbIkBpZEHJ8oMKzL6j47kQT5BpIXrg3b5UVygW9QCI2fpFdMocT5Kudg==} + '@rolldown/binding-linux-x64-gnu@1.0.0-beta.51': + resolution: {integrity: sha512-p5P6Xpa68w3yFaAdSzIZJbj+AfuDnMDqNSeglBXM7UlJT14Q4zwK+rV+8Mhp9MiUb4XFISZtbI/seBprhkQbiQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] @@ -584,8 +567,8 @@ packages: cpu: [x64] os: [linux] - '@rolldown/binding-linux-x64-musl@1.0.0-beta.44': - resolution: {integrity: sha512-qI2IiPqmPRW25exXkuQr3TlweCDc05YvvbSDRPCuPsWkwb70dTiSoXn8iFxT4PWqTi71wWHg1Wyta9PlVhX5VA==} + '@rolldown/binding-linux-x64-musl@1.0.0-beta.51': + resolution: {integrity: sha512-sNVVyLa8HB8wkFipdfz1s6i0YWinwpbMWk5hO5S+XAYH2UH67YzUT13gs6wZTKg2x/3gtgXzYnHyF5wMIqoDAw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] @@ -595,14 +578,14 @@ packages: cpu: [x64] os: [linux] - '@rolldown/binding-openharmony-arm64@1.0.0-beta.44': - resolution: {integrity: sha512-+vHvEc1pL5iJRFlldLC8mjm6P4Qciyfh2bh5ZI6yxDQKbYhCHRKNURaKz1mFcwxhVL5YMYsLyaqM3qizVif9MQ==} + '@rolldown/binding-openharmony-arm64@1.0.0-beta.51': + resolution: {integrity: sha512-e/JMTz9Q8+T3g/deEi8DK44sFWZWGKr9AOCW5e8C8SCVWzAXqYXAG7FXBWBNzWEZK0Rcwo9TQHTQ9Q0gXgdCaA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@rolldown/binding-wasm32-wasi@1.0.0-beta.44': - resolution: {integrity: sha512-XSgLxRrtFj6RpTeMYmmQDAwHjKseYGKUn5LPiIdW4Cq+f5SBSStL2ToBDxkbdxKPEbCZptnLPQ/nfKcAxrC8Xg==} + '@rolldown/binding-wasm32-wasi@1.0.0-beta.51': + resolution: {integrity: sha512-We3LWqSu6J9s5Y0MK+N7fUiiu37aBGPG3Pc347EoaROuAwkCS2u9xJ5dpIyLW4B49CIbS3KaPmn4kTgPb3EyPw==} engines: {node: '>=14.0.0'} cpu: [wasm32] @@ -611,8 +594,8 @@ packages: engines: {node: '>=14.21.3'} cpu: [wasm32] - '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.44': - resolution: {integrity: sha512-cF1LJdDIX02cJrFrX3wwQ6IzFM7I74BYeKFkzdcIA4QZ0+2WA7/NsKIgjvrunupepWb1Y6PFWdRlHSaz5AW1Wg==} + '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.51': + resolution: {integrity: sha512-fj56buHRuMM+r/cb6ZYfNjNvO/0xeFybI6cTkTROJatdP4fvmQ1NS8D/Lm10FCSDEOkqIz8hK3TGpbAThbPHsA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] @@ -622,8 +605,8 @@ packages: cpu: [arm64] os: [win32] - '@rolldown/binding-win32-ia32-msvc@1.0.0-beta.44': - resolution: {integrity: sha512-5uaJonDafhHiMn+iEh7qUp3QQ4Gihv3lEOxKfN8Vwadpy0e+5o28DWI42DpJ9YBYMrVy4JOWJ/3etB/sptpUwA==} + '@rolldown/binding-win32-ia32-msvc@1.0.0-beta.51': + resolution: {integrity: sha512-fkqEqaeEx8AySXiDm54b/RdINb3C0VovzJA3osMhZsbn6FoD73H0AOIiaVAtGr6x63hefruVKTX8irAm4Jkt2w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] @@ -633,8 +616,8 @@ packages: cpu: [ia32] os: [win32] - '@rolldown/binding-win32-x64-msvc@1.0.0-beta.44': - resolution: {integrity: sha512-vsqhWAFJkkmgfBN/lkLCWTXF1PuPhMjfnAyru48KvF7mVh2+K7WkKYHezF3Fjz4X/mPScOcIv+g6cf6wnI6eWg==} + '@rolldown/binding-win32-x64-msvc@1.0.0-beta.51': + resolution: {integrity: sha512-CWuLG/HMtrVcjKGa0C4GnuxONrku89g0+CsH8nT0SNhOtREXuzwgjIXNJImpE/A/DMf9JF+1Xkrq/YRr+F/rCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -644,123 +627,127 @@ packages: cpu: [x64] os: [win32] - '@rolldown/pluginutils@1.0.0-beta.44': - resolution: {integrity: sha512-g6eW7Zwnr2c5RADIoqziHoVs6b3W5QTQ4+qbpfjbkMJ9x+8Og211VW/oot2dj9dVwaK/UyC6Yo+02gV+wWQVNg==} + '@rolldown/pluginutils@1.0.0-beta.51': + resolution: {integrity: sha512-51/8cNXMrqWqX3o8DZidhwz1uYq0BhHDDSfVygAND1Skx5s1TDw3APSSxCMcFFedwgqGcx34gRouwY+m404BBQ==} '@rolldown/pluginutils@1.0.0-beta.9': resolution: {integrity: sha512-e9MeMtVWo186sgvFFJOPGy7/d2j2mZhLJIdVW0C/xDluuOvymEATqz6zKsP0ZmXGzQtqlyjz5sC1sYQUoJG98w==} - '@rollup/rollup-android-arm-eabi@4.44.1': - resolution: {integrity: sha512-JAcBr1+fgqx20m7Fwe1DxPUl/hPkee6jA6Pl7n1v2EFiktAHenTaXl5aIFjUIEsfn9w3HE4gK1lEgNGMzBDs1w==} + '@rollup/rollup-android-arm-eabi@4.53.3': + resolution: {integrity: sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==} cpu: [arm] os: [android] - '@rollup/rollup-android-arm64@4.44.1': - resolution: {integrity: sha512-RurZetXqTu4p+G0ChbnkwBuAtwAbIwJkycw1n6GvlGlBuS4u5qlr5opix8cBAYFJgaY05TWtM+LaoFggUmbZEQ==} + '@rollup/rollup-android-arm64@4.53.3': + resolution: {integrity: sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==} cpu: [arm64] os: [android] - '@rollup/rollup-darwin-arm64@4.44.1': - resolution: {integrity: sha512-fM/xPesi7g2M7chk37LOnmnSTHLG/v2ggWqKj3CCA1rMA4mm5KVBT1fNoswbo1JhPuNNZrVwpTvlCVggv8A2zg==} + '@rollup/rollup-darwin-arm64@4.53.3': + resolution: {integrity: sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.44.1': - resolution: {integrity: sha512-gDnWk57urJrkrHQ2WVx9TSVTH7lSlU7E3AFqiko+bgjlh78aJ88/3nycMax52VIVjIm3ObXnDL2H00e/xzoipw==} + '@rollup/rollup-darwin-x64@4.53.3': + resolution: {integrity: sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==} cpu: [x64] os: [darwin] - '@rollup/rollup-freebsd-arm64@4.44.1': - resolution: {integrity: sha512-wnFQmJ/zPThM5zEGcnDcCJeYJgtSLjh1d//WuHzhf6zT3Md1BvvhJnWoy+HECKu2bMxaIcfWiu3bJgx6z4g2XA==} + '@rollup/rollup-freebsd-arm64@4.53.3': + resolution: {integrity: sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==} cpu: [arm64] os: [freebsd] - '@rollup/rollup-freebsd-x64@4.44.1': - resolution: {integrity: sha512-uBmIxoJ4493YATvU2c0upGz87f99e3wop7TJgOA/bXMFd2SvKCI7xkxY/5k50bv7J6dw1SXT4MQBQSLn8Bb/Uw==} + '@rollup/rollup-freebsd-x64@4.53.3': + resolution: {integrity: sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==} cpu: [x64] os: [freebsd] - '@rollup/rollup-linux-arm-gnueabihf@4.44.1': - resolution: {integrity: sha512-n0edDmSHlXFhrlmTK7XBuwKlG5MbS7yleS1cQ9nn4kIeW+dJH+ExqNgQ0RrFRew8Y+0V/x6C5IjsHrJmiHtkxQ==} + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': + resolution: {integrity: sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.44.1': - resolution: {integrity: sha512-8WVUPy3FtAsKSpyk21kV52HCxB+me6YkbkFHATzC2Yd3yuqHwy2lbFL4alJOLXKljoRw08Zk8/xEj89cLQ/4Nw==} + '@rollup/rollup-linux-arm-musleabihf@4.53.3': + resolution: {integrity: sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm64-gnu@4.44.1': - resolution: {integrity: sha512-yuktAOaeOgorWDeFJggjuCkMGeITfqvPgkIXhDqsfKX8J3jGyxdDZgBV/2kj/2DyPaLiX6bPdjJDTu9RB8lUPQ==} + '@rollup/rollup-linux-arm64-gnu@4.53.3': + resolution: {integrity: sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-arm64-musl@4.44.1': - resolution: {integrity: sha512-W+GBM4ifET1Plw8pdVaecwUgxmiH23CfAUj32u8knq0JPFyK4weRy6H7ooxYFD19YxBulL0Ktsflg5XS7+7u9g==} + '@rollup/rollup-linux-arm64-musl@4.53.3': + resolution: {integrity: sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-loongarch64-gnu@4.44.1': - resolution: {integrity: sha512-1zqnUEMWp9WrGVuVak6jWTl4fEtrVKfZY7CvcBmUUpxAJ7WcSowPSAWIKa/0o5mBL/Ij50SIf9tuirGx63Ovew==} + '@rollup/rollup-linux-loong64-gnu@4.53.3': + resolution: {integrity: sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==} cpu: [loong64] os: [linux] - '@rollup/rollup-linux-powerpc64le-gnu@4.44.1': - resolution: {integrity: sha512-Rl3JKaRu0LHIx7ExBAAnf0JcOQetQffaw34T8vLlg9b1IhzcBgaIdnvEbbsZq9uZp3uAH+JkHd20Nwn0h9zPjA==} + '@rollup/rollup-linux-ppc64-gnu@4.53.3': + resolution: {integrity: sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-riscv64-gnu@4.44.1': - resolution: {integrity: sha512-j5akelU3snyL6K3N/iX7otLBIl347fGwmd95U5gS/7z6T4ftK288jKq3A5lcFKcx7wwzb5rgNvAg3ZbV4BqUSw==} + '@rollup/rollup-linux-riscv64-gnu@4.53.3': + resolution: {integrity: sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-riscv64-musl@4.44.1': - resolution: {integrity: sha512-ppn5llVGgrZw7yxbIm8TTvtj1EoPgYUAbfw0uDjIOzzoqlZlZrLJ/KuiE7uf5EpTpCTrNt1EdtzF0naMm0wGYg==} + '@rollup/rollup-linux-riscv64-musl@4.53.3': + resolution: {integrity: sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-s390x-gnu@4.44.1': - resolution: {integrity: sha512-Hu6hEdix0oxtUma99jSP7xbvjkUM/ycke/AQQ4EC5g7jNRLLIwjcNwaUy95ZKBJJwg1ZowsclNnjYqzN4zwkAw==} + '@rollup/rollup-linux-s390x-gnu@4.53.3': + resolution: {integrity: sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==} cpu: [s390x] os: [linux] - '@rollup/rollup-linux-x64-gnu@4.44.1': - resolution: {integrity: sha512-EtnsrmZGomz9WxK1bR5079zee3+7a+AdFlghyd6VbAjgRJDbTANJ9dcPIPAi76uG05micpEL+gPGmAKYTschQw==} + '@rollup/rollup-linux-x64-gnu@4.53.3': + resolution: {integrity: sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==} cpu: [x64] os: [linux] - '@rollup/rollup-linux-x64-musl@4.44.1': - resolution: {integrity: sha512-iAS4p+J1az6Usn0f8xhgL4PaU878KEtutP4hqw52I4IO6AGoyOkHCxcc4bqufv1tQLdDWFx8lR9YlwxKuv3/3g==} + '@rollup/rollup-linux-x64-musl@4.53.3': + resolution: {integrity: sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==} cpu: [x64] os: [linux] - '@rollup/rollup-win32-arm64-msvc@4.44.1': - resolution: {integrity: sha512-NtSJVKcXwcqozOl+FwI41OH3OApDyLk3kqTJgx8+gp6On9ZEt5mYhIsKNPGuaZr3p9T6NWPKGU/03Vw4CNU9qg==} + '@rollup/rollup-openharmony-arm64@4.53.3': + resolution: {integrity: sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.53.3': + resolution: {integrity: sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==} cpu: [arm64] os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.44.1': - resolution: {integrity: sha512-JYA3qvCOLXSsnTR3oiyGws1Dm0YTuxAAeaYGVlGpUsHqloPcFjPg+X0Fj2qODGLNwQOAcCiQmHub/V007kiH5A==} + '@rollup/rollup-win32-ia32-msvc@4.53.3': + resolution: {integrity: sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==} cpu: [ia32] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.44.1': - resolution: {integrity: sha512-J8o22LuF0kTe7m+8PvW9wk3/bRq5+mRo5Dqo6+vXb7otCm3TPhYOJqOaQtGU9YMWQSL3krMnoOxMr0+9E6F3Ug==} + '@rollup/rollup-win32-x64-gnu@4.53.3': + resolution: {integrity: sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==} cpu: [x64] os: [win32] - '@trycua/core@0.1.2': - resolution: {integrity: sha512-pSQZaR46OG3MtUCBaneG6RpJD1xfX754VDZ101FM5tkUUiymIrxpQicQEUfhwEBxbI/EmBnmCnVY1AFKvykKzQ==} + '@rollup/rollup-win32-x64-msvc@4.53.3': + resolution: {integrity: sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==} + cpu: [x64] + os: [win32] '@tybys/wasm-util@0.10.1': resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} - '@tybys/wasm-util@0.9.0': - resolution: {integrity: sha512-6+7nlbMVX/PVDCwaIQ8nTOPveOcFLSt8GcXdx8hD0bt39uWxYT88uXzqTd4fTvqta7oeUJqudepapKNt2DYJFw==} - - '@types/chai@5.2.2': - resolution: {integrity: sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==} + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} '@types/deep-eql@4.0.2': resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} @@ -768,8 +755,8 @@ packages: '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} - '@types/node@22.15.34': - resolution: {integrity: sha512-8Y6E5WUupYy1Dd0II32BsWAx5MWdcnRd8L84Oys3veg1YrYtNtzgO4CFhiBg6MDSjk7Ay36HYOnU7/tuOzIzcw==} + '@types/node@22.19.1': + resolution: {integrity: sha512-LCCV0HdSZZZb34qifBsyWlUmok6W7ouER+oQIGBScS8EsZsQbrtFTUrDX4hOl+CS6p7cnNC4td+qrSVGSCTUfQ==} '@types/uuid@10.0.0': resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} @@ -835,8 +822,8 @@ packages: '@vitest/utils@3.2.4': resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} - ansis@4.1.0: - resolution: {integrity: sha512-BGcItUBWSMRgOCe+SVZJ+S7yTRG0eGt9cXAHev72yuGcY23hnLA7Bky5L/xLyPINoSN95geovfBkqoTlNZYa7w==} + ansis@4.2.0: + resolution: {integrity: sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==} engines: {node: '>=14'} args-tokenizer@0.3.0: @@ -846,33 +833,26 @@ packages: resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} engines: {node: '>=12'} - ast-kit@2.1.0: - resolution: {integrity: sha512-ROM2LlXbZBZVk97crfw8PGDOBzzsJvN2uJCmwswvPUNyfH14eg90mSN3xNqsri1JS1G9cz0VzeDUhxJkTrr4Ew==} - engines: {node: '>=20.18.0'} - - ast-kit@2.1.2: - resolution: {integrity: sha512-cl76xfBQM6pztbrFWRnxbrDm9EOqDr1BF6+qQnnDZG2Co2LjyUktkN9GTJfBAfdae+DbT2nJf2nCGAdDDN7W2g==} - engines: {node: '>=20.18.0'} + ast-kit@2.2.0: + resolution: {integrity: sha512-m1Q/RaVOnTp9JxPX+F+Zn7IcLYMzM8kZofDImfsKZd8MbR+ikdOzTeztStWqfrqIxZnYWryyI9ePm3NGjnZgGw==} + engines: {node: '>=20.19.0'} atomic-sleep@1.0.0: resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==} engines: {node: '>=8.0.0'} - birpc@2.4.0: - resolution: {integrity: sha512-5IdNxTyhXHv2UlgnPHQ0h+5ypVmkrYHzL8QT+DwFZ//2N/oNV8Ch+BCRmTJ3x6/z9Axo/cXYBc9eprsUVK/Jsg==} + birpc@2.8.0: + resolution: {integrity: sha512-Bz2a4qD/5GRhiHSwj30c/8kC8QGj12nNDwz3D4ErQ4Xhy35dsSDvF+RA/tWpjyU0pdGtSDiEk6B5fBGE1qNVhw==} - birpc@2.5.0: - resolution: {integrity: sha512-VSWO/W6nNQdyP520F1mhf+Lc2f8pjGQOtoHHm7Ze8Go1kX7akpVIrtTa0fn+HB0QJEDVacl6aO08YE0PgXfdnQ==} - - bumpp@10.2.0: - resolution: {integrity: sha512-1EJ2NG3M3WYJj4m+GtcxNH6Y7zMQ8q68USMoUGKjM6qFTVXSXCnTxcQSUDV7j4KjLVbk2uK6345Z+6RKOv0w5A==} + bumpp@10.3.1: + resolution: {integrity: sha512-cOKPRFCWvHcYPJQAHN6V7Jp/wAfnyqQRXQ+2fgWIL6Gao20rpu7xQ1cGGo1APOfmbQmmHngEPg9Fy7nJ3giRkQ==} engines: {node: '>=18'} hasBin: true - c12@3.0.4: - resolution: {integrity: sha512-t5FaZTYbbCtvxuZq9xxIruYydrAGsJ+8UdP0pZzMiK2xl/gNiSOy0OxhLzHUEEb0m1QXYqfzfvyIFEmz/g9lqg==} + c12@3.3.2: + resolution: {integrity: sha512-QkikB2X5voO1okL3QsES0N690Sn/K9WokXqUsDQsWy5SnYb+psYQFGA10iy1bZHj3fjISKsI67Q90gruvWWM3A==} peerDependencies: - magicast: ^0.3.5 + magicast: '*' peerDependenciesMeta: magicast: optional: true @@ -881,9 +861,9 @@ packages: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} - chai@5.2.0: - resolution: {integrity: sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==} - engines: {node: '>=12'} + chai@5.3.3: + resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} + engines: {node: '>=18'} check-error@2.1.1: resolution: {integrity: sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==} @@ -903,8 +883,12 @@ packages: resolution: {integrity: sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==} engines: {node: ^14.18.0 || >=16.10.0} - debug@4.4.1: - resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -926,13 +910,13 @@ packages: resolution: {integrity: sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==} engines: {node: '>=0.3.1'} - dotenv@16.6.1: - resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} + dotenv@17.2.3: + resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==} engines: {node: '>=12'} - dts-resolver@2.1.1: - resolution: {integrity: sha512-3BiGFhB6mj5Kv+W2vdJseQUYW+SKVzAFJL6YNP6ursbrwy1fXHRotfHi3xLNxe4wZl/K8qbAFeCDjZLjzqxxRw==} - engines: {node: '>=20.18.0'} + dts-resolver@2.1.3: + resolution: {integrity: sha512-bihc7jPC90VrosXNzK0LTE2cuLP6jr0Ro8jk+kMugHReJVLIpHz/xadeq3MhuwyO4TD4OA3L1Q8pBBFRc08Tsw==} + engines: {node: '>=20.19.0'} peerDependencies: oxc-resolver: '>=11.0.0' peerDependenciesMeta: @@ -955,8 +939,8 @@ packages: engines: {node: '>=12'} hasBin: true - esbuild@0.25.5: - resolution: {integrity: sha512-P8OtKZRv/5J5hhz0cUAdu/cLuPIKXpQl1R9pZtvmHWQvrAUVd0UNIPT4IB4W3rNOqVO0rlqHmCIbSwxh/c9yUQ==} + esbuild@0.25.12: + resolution: {integrity: sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==} engines: {node: '>=18'} hasBin: true @@ -970,19 +954,16 @@ packages: eventemitter3@4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} - expect-type@1.2.1: - resolution: {integrity: sha512-/kP8CAwxzLVEeFrMm4kMmy4CCDlpipyA7MYLVrdJIkV0fYF0UaigQHRsxHiuY/GEea+bh4KSv3TIlgr+2UL6bw==} + expect-type@1.2.2: + resolution: {integrity: sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==} engines: {node: '>=12.0.0'} - exsolve@1.0.7: - resolution: {integrity: sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==} + exsolve@1.0.8: + resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} - fast-redact@3.5.0: - resolution: {integrity: sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==} - engines: {node: '>=6'} - - fdir@6.4.6: - resolution: {integrity: sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w==} + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: @@ -994,8 +975,8 @@ packages: engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] - get-tsconfig@4.10.1: - resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==} + get-tsconfig@4.13.0: + resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==} giget@2.0.0: resolution: {integrity: sha512-L5bGsVkxJbJgdnwyuheIunkGatUF/zssUoxxjACCseZYAVbaqdh9Tsmmlkl8vYan09H7sbvKt4pS8GqKLBrEzA==} @@ -1008,8 +989,11 @@ packages: hookable@5.5.3: resolution: {integrity: sha512-Yc+BQe8SvoXH1643Qez1zqLRmbA5rCL+sSmk6TVos0LWVfNIB7PGncdlId77WzLGSIB5KaWgTaNTs2lNVEI6VQ==} - jiti@2.4.2: - resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==} + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} hasBin: true js-tokens@9.0.1: @@ -1023,11 +1007,11 @@ packages: jsonc-parser@3.3.1: resolution: {integrity: sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==} - loupe@3.1.4: - resolution: {integrity: sha512-wJzkKwJrheKtknCOKNEtDK4iqg/MxmZheEMtSTYvnzRdEYaZzmgH976nenp8WdJRdx5Vc1X/9MO0Oszl6ezeXg==} + loupe@3.2.1: + resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} - magic-string@0.30.17: - resolution: {integrity: sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==} + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} @@ -1037,11 +1021,11 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - node-fetch-native@1.6.6: - resolution: {integrity: sha512-8Mc2HhqPdlIfedsuZoc3yioPuzp6b+L5jRCRY1QzuWZh2EGJVQrGppC6V6cF0bLdbW0+O2YpqCA25aF/1lvipQ==} + node-fetch-native@1.6.7: + resolution: {integrity: sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==} - nypm@0.6.0: - resolution: {integrity: sha512-mn8wBFV9G9+UFHIrq+pZ2r2zL4aPau/by3kJb3cM7+5tQHMt6HGQB8FDIeKFYp8o0D2pnH6nVsO88N4AmUxIWg==} + nypm@0.6.2: + resolution: {integrity: sha512-7eM+hpOtrKrBDCh7Ypu2lJ9Z7PNZBdi/8AT3AX8xoCj43BBVHD0hPSTEvMtkMpfs8FCqBGhxB+uToIQimA111g==} engines: {node: ^14.16.0 || >=16.10.0} hasBin: true @@ -1052,8 +1036,12 @@ packages: resolution: {integrity: sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==} engines: {node: '>=14.0.0'} - package-manager-detector@1.3.0: - resolution: {integrity: sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ==} + package-manager-detector@1.5.0: + resolution: {integrity: sha512-uBj69dVlYe/+wxj8JOpr97XfsxH/eumMt6HqjNTmJDf/6NO9s+0uxeOneIz3AsPt2m6y9PqzDzd3ATcU17MNfw==} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} pathe@1.1.2: resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} @@ -1073,14 +1061,14 @@ packages: resolution: {integrity: sha512-viMUCPDL6CSfOu0ZqVcFqbWRXNHIbv2lPqNbrBIjbFYrflebOjItJ4hPfhjnuUCstqciHVu9vVJ7jFqqKi/EuQ==} engines: {node: '>= 14'} - perfect-debounce@1.0.0: - resolution: {integrity: sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==} + perfect-debounce@2.0.0: + resolution: {integrity: sha512-fkEH/OBiKrqqI/yIgjR92lMfs2K8105zt/VT6+7eTjNwisrsh47CeIED9z58zI7DfKdH3uHAn25ziRZn3kgAow==} picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} - picomatch@4.0.2: - resolution: {integrity: sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==} + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} pino-abstract-transport@2.0.0: @@ -1089,19 +1077,19 @@ packages: pino-std-serializers@7.0.0: resolution: {integrity: sha512-e906FRY0+tV27iq4juKzSYPbUj2do2X2JX4EzSca1631EB2QJQUqGbDuERal7LCtOpxl6x3+nvo9NPZcmjkiFA==} - pino@9.7.0: - resolution: {integrity: sha512-vnMCM6xZTb1WDmLvtG2lE/2p+t9hDEIvTWJsu6FejkE62vB7gDhvzrpFR4Cw2to+9JNQxVnkAKVPA1KPB98vWg==} + pino@9.14.0: + resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==} hasBin: true - pkg-types@2.1.1: - resolution: {integrity: sha512-eY0QFb6eSwc9+0d/5D2lFFUq+A3n3QNGSy/X2Nvp+6MfzGw2u6EbA7S80actgjY1lkvvI0pqB+a4hioMh443Ew==} + pkg-types@2.3.0: + resolution: {integrity: sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==} postcss@8.5.6: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} - posthog-node@5.1.1: - resolution: {integrity: sha512-6VISkNdxO24ehXiDA4dugyCSIV7lpGVaEu5kn/dlAj+SJ1lgcDru9PQ8p/+GSXsXVxohd1t7kHL2JKc9NoGb0w==} + posthog-node@5.13.3: + resolution: {integrity: sha512-4kvyC0kwN2ErSs5SE6HnxOd5mm7GB8csIXVHH1Fz2bnVG+Oi1IQAeDtWiWqrVhgycfZMt+d+DHJoTwlMha8GYw==} engines: {node: '>=20'} prettier@3.6.2: @@ -1112,8 +1100,8 @@ packages: process-warning@5.0.0: resolution: {integrity: sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA==} - quansync@0.2.10: - resolution: {integrity: sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A==} + quansync@0.2.11: + resolution: {integrity: sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA==} quick-format-unescaped@4.0.4: resolution: {integrity: sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==} @@ -1132,14 +1120,14 @@ packages: resolve-pkg-maps@1.0.0: resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} - rolldown-plugin-dts@0.13.13: - resolution: {integrity: sha512-Nchx9nQoa4IpfQ/BJzodKMvtJ3H3dT322siAJSp3uvQJ+Pi1qgEjOp7hSQwGSQRhaC5gC+9hparbWEH5oiAL9Q==} + rolldown-plugin-dts@0.13.14: + resolution: {integrity: sha512-wjNhHZz9dlN6PTIXyizB6u/mAg1wEFMW9yw7imEVe3CxHSRnNHVyycIX0yDEOVJfDNISLPbkCIPEpFpizy5+PQ==} engines: {node: '>=20.18.0'} peerDependencies: '@typescript/native-preview': '>=7.0.0-dev.20250601.1' rolldown: ^1.0.0-beta.9 typescript: ^5.0.0 - vue-tsc: ~2.2.0 + vue-tsc: ^2.2.0 || ^3.0.0 peerDependenciesMeta: '@typescript/native-preview': optional: true @@ -1148,8 +1136,8 @@ packages: vue-tsc: optional: true - rolldown-plugin-dts@0.15.7: - resolution: {integrity: sha512-BpdrnLaa+uyw0rPT47+4FUC7hQFazBFppeFT0ioW5Ybg0XCLeRohc3HHPlnCxI6LtzgSWT7Ot8ahn6ji10IQBg==} + rolldown-plugin-dts@0.15.10: + resolution: {integrity: sha512-8cPVAVQUo9tYAoEpc3jFV9RxSil13hrRRg8cHC9gLXxRMNtWPc1LNMSDXzjyD+5Vny49sDZH77JlXp/vlc4I3g==} engines: {node: '>=20.18.0'} peerDependencies: '@typescript/native-preview': '>=7.0.0-dev.20250601.1' @@ -1164,8 +1152,8 @@ packages: vue-tsc: optional: true - rolldown@1.0.0-beta.44: - resolution: {integrity: sha512-gcqgyCi3g93Fhr49PKvymE8PoaGS0sf6ajQrsYaQ8o5de6aUEbD6rJZiJbhOfpcqOnycgsAsUNPYri1h25NgsQ==} + rolldown@1.0.0-beta.51: + resolution: {integrity: sha512-ZRLgPlS91l4JztLYEZnmMcd3Umcla1hkXJgiEiR4HloRJBBoeaX8qogTu5Jfu36rRMVLndzqYv0h+M5gJAkUfg==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -1178,8 +1166,8 @@ packages: '@oxc-project/runtime': optional: true - rollup@4.44.1: - resolution: {integrity: sha512-x8H8aPvD+xbl0Do8oez5f5o8eMS3trfCghc4HhLAnCkj7Vl0d1JWGs0UF/D886zLW2rOj2QymV/JcSSsw+XDNg==} + rollup@4.53.3: + resolution: {integrity: sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true @@ -1190,11 +1178,19 @@ packages: sdp@3.2.1: resolution: {integrity: sha512-lwsAIzOPlH8/7IIjjz3K0zYBk7aBVVcvjMwt3M4fLxpjMYyy7i3I97SLHebgn4YBjirkzfp3RvRDWSKsh/+WFw==} - semver@7.7.2: - resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} engines: {node: '>=10'} hasBin: true + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + siginfo@2.0.0: resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} @@ -1212,11 +1208,11 @@ packages: stackback@0.0.2: resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} - std-env@3.9.0: - resolution: {integrity: sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==} + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} - strip-literal@3.0.0: - resolution: {integrity: sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==} + strip-literal@3.1.0: + resolution: {integrity: sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==} thread-stream@3.1.0: resolution: {integrity: sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==} @@ -1227,11 +1223,12 @@ packages: tinyexec@0.3.2: resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} - tinyexec@1.0.1: - resolution: {integrity: sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw==} + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} - tinyglobby@0.2.14: - resolution: {integrity: sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==} + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} tinypool@1.1.1: @@ -1250,8 +1247,8 @@ packages: resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} engines: {node: '>=14.0.0'} - tinyspy@4.0.3: - resolution: {integrity: sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==} + tinyspy@4.0.4: + resolution: {integrity: sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==} engines: {node: '>=14.0.0'} tree-kill@1.2.2: @@ -1277,8 +1274,8 @@ packages: unplugin-unused: optional: true - tsdown@0.14.1: - resolution: {integrity: sha512-/nBuFDKZeYln9hAxwWG5Cm55/823sNIVI693iVi0xRFHzf9OVUq4b/lx9PH1TErFr/IQ0kd2hutFbJIPM0XQWA==} + tsdown@0.14.2: + resolution: {integrity: sha512-6ThtxVZoTlR5YJov5rYvH8N1+/S/rD/pGfehdCLGznGgbxz+73EASV1tsIIZkLw2n+SXcERqHhcB/OkyxdKv3A==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: @@ -1302,18 +1299,21 @@ packages: tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} - tsx@4.20.3: - resolution: {integrity: sha512-qjbnuR9Tr+FJOMBqJCW5ehvIo/buZq7vH7qD7JziU98h6l3qGy0a/yPFjwO+y0/T7GFpNgNAvEcPPVfyT8rrPQ==} + tsx@4.20.6: + resolution: {integrity: sha512-ytQKuwgmrrkDTFP4LjR0ToE2nqgy886GpvRSpU0JAnrdBYppuY5rLkRUYPU1yCryb24SsKBTL/hlDQAEFVwtZg==} engines: {node: '>=18.0.0'} hasBin: true - typescript@5.8.3: - resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} engines: {node: '>=14.17'} hasBin: true - unconfig@7.3.2: - resolution: {integrity: sha512-nqG5NNL2wFVGZ0NA/aCFw0oJ2pxSf1lwg4Z5ill8wd7K4KX/rQbHlwbh+bjctXL5Ly1xtzHenHGOK0b+lG6JVg==} + unconfig-core@7.4.1: + resolution: {integrity: sha512-Bp/bPZjV2Vl/fofoA2OYLSnw1Z0MOhCX7zHnVCYrazpfZvseBbGhwcNQMxsg185Mqh7VZQqK3C8hFG/Dyng+yA==} + + unconfig@7.4.1: + resolution: {integrity: sha512-uyQ7LElcGizrOGZyIq9KU+xkuEjcRf9IpmDTkCSYv5mEeZzrXSj6rb51C0L+WTedsmAoVxW9WKrLWhSwebIM9Q==} undici-types@6.21.0: resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} @@ -1332,8 +1332,8 @@ packages: engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} hasBin: true - vite@5.4.19: - resolution: {integrity: sha512-qO3aKv3HoQC8QKiNSTuUM1l9o/XX3+c+VTgLHbJWHZGeTPVAg2XwazI9UWzoxjIJCGCV2zU60uqMzjeLZuULqA==} + vite@5.4.21: + resolution: {integrity: sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -1363,8 +1363,8 @@ packages: terser: optional: true - vite@7.0.0: - resolution: {integrity: sha512-ixXJB1YRgDIw2OszKQS9WxGHKwLdCsbQNkpJN171udl6szi/rIySHL6/Os3s2+oE4P/FLD4dxg4mD7Wust+u5g==} + vite@7.2.4: + resolution: {integrity: sha512-NL8jTlbo0Tn4dUEXEsUg8KeyG/Lkmc4Fnzb8JXN/Ykm9G4HNImjtABMJgkQoVjOBN/j2WAwDTRytdqJbZsah7w==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: @@ -1468,6 +1468,11 @@ packages: resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==} engines: {node: '>=12'} + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + why-is-node-running@2.3.0: resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} engines: {node: '>=8'} @@ -1485,74 +1490,47 @@ packages: utf-8-validate: optional: true - yaml@2.8.0: - resolution: {integrity: sha512-4lLa/EcQCB0cJkyts+FpIRx5G/llPxfP6VQU5KByHEhLxY3IJCH0f0Hy1MHI8sClTvsIb8qwRJ6R/ZdlDJ/leQ==} + yaml@2.8.1: + resolution: {integrity: sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==} engines: {node: '>= 14.6'} hasBin: true +onlyBuiltDependencies: + - esbuild + - protobufjs + - sharp + - unrs-resolver + snapshots: - '@babel/generator@7.27.5': + '@babel/generator@7.28.5': dependencies: - '@babel/parser': 7.27.7 - '@babel/types': 7.27.7 - '@jridgewell/gen-mapping': 0.3.10 - '@jridgewell/trace-mapping': 0.3.27 - jsesc: 3.1.0 - - '@babel/generator@7.28.3': - dependencies: - '@babel/parser': 7.28.3 - '@babel/types': 7.28.2 + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 '@jridgewell/gen-mapping': 0.3.13 - '@jridgewell/trace-mapping': 0.3.30 + '@jridgewell/trace-mapping': 0.3.31 jsesc: 3.1.0 '@babel/helper-string-parser@7.27.1': {} - '@babel/helper-validator-identifier@7.27.1': {} + '@babel/helper-validator-identifier@7.28.5': {} - '@babel/parser@7.27.7': + '@babel/parser@7.28.5': dependencies: - '@babel/types': 7.27.7 + '@babel/types': 7.28.5 - '@babel/parser@7.28.3': - dependencies: - '@babel/types': 7.28.2 - - '@babel/types@7.27.7': + '@babel/types@7.28.5': dependencies: '@babel/helper-string-parser': 7.27.1 - '@babel/helper-validator-identifier': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 - '@babel/types@7.28.2': - dependencies: - '@babel/helper-string-parser': 7.27.1 - '@babel/helper-validator-identifier': 7.27.1 - - '@emnapi/core@1.4.3': - dependencies: - '@emnapi/wasi-threads': 1.0.2 - tslib: 2.8.1 - optional: true - - '@emnapi/core@1.5.0': + '@emnapi/core@1.7.1': dependencies: '@emnapi/wasi-threads': 1.1.0 tslib: 2.8.1 optional: true - '@emnapi/runtime@1.4.3': - dependencies: - tslib: 2.8.1 - optional: true - - '@emnapi/runtime@1.5.0': - dependencies: - tslib: 2.8.1 - optional: true - - '@emnapi/wasi-threads@1.0.2': + '@emnapi/runtime@1.7.1': dependencies: tslib: 2.8.1 optional: true @@ -1565,367 +1543,361 @@ snapshots: '@esbuild/aix-ppc64@0.21.5': optional: true - '@esbuild/aix-ppc64@0.25.5': + '@esbuild/aix-ppc64@0.25.12': optional: true '@esbuild/android-arm64@0.21.5': optional: true - '@esbuild/android-arm64@0.25.5': + '@esbuild/android-arm64@0.25.12': optional: true '@esbuild/android-arm@0.21.5': optional: true - '@esbuild/android-arm@0.25.5': + '@esbuild/android-arm@0.25.12': optional: true '@esbuild/android-x64@0.21.5': optional: true - '@esbuild/android-x64@0.25.5': + '@esbuild/android-x64@0.25.12': optional: true '@esbuild/darwin-arm64@0.21.5': optional: true - '@esbuild/darwin-arm64@0.25.5': + '@esbuild/darwin-arm64@0.25.12': optional: true '@esbuild/darwin-x64@0.21.5': optional: true - '@esbuild/darwin-x64@0.25.5': + '@esbuild/darwin-x64@0.25.12': optional: true '@esbuild/freebsd-arm64@0.21.5': optional: true - '@esbuild/freebsd-arm64@0.25.5': + '@esbuild/freebsd-arm64@0.25.12': optional: true '@esbuild/freebsd-x64@0.21.5': optional: true - '@esbuild/freebsd-x64@0.25.5': + '@esbuild/freebsd-x64@0.25.12': optional: true '@esbuild/linux-arm64@0.21.5': optional: true - '@esbuild/linux-arm64@0.25.5': + '@esbuild/linux-arm64@0.25.12': optional: true '@esbuild/linux-arm@0.21.5': optional: true - '@esbuild/linux-arm@0.25.5': + '@esbuild/linux-arm@0.25.12': optional: true '@esbuild/linux-ia32@0.21.5': optional: true - '@esbuild/linux-ia32@0.25.5': + '@esbuild/linux-ia32@0.25.12': optional: true '@esbuild/linux-loong64@0.21.5': optional: true - '@esbuild/linux-loong64@0.25.5': + '@esbuild/linux-loong64@0.25.12': optional: true '@esbuild/linux-mips64el@0.21.5': optional: true - '@esbuild/linux-mips64el@0.25.5': + '@esbuild/linux-mips64el@0.25.12': optional: true '@esbuild/linux-ppc64@0.21.5': optional: true - '@esbuild/linux-ppc64@0.25.5': + '@esbuild/linux-ppc64@0.25.12': optional: true '@esbuild/linux-riscv64@0.21.5': optional: true - '@esbuild/linux-riscv64@0.25.5': + '@esbuild/linux-riscv64@0.25.12': optional: true '@esbuild/linux-s390x@0.21.5': optional: true - '@esbuild/linux-s390x@0.25.5': + '@esbuild/linux-s390x@0.25.12': optional: true '@esbuild/linux-x64@0.21.5': optional: true - '@esbuild/linux-x64@0.25.5': + '@esbuild/linux-x64@0.25.12': optional: true - '@esbuild/netbsd-arm64@0.25.5': + '@esbuild/netbsd-arm64@0.25.12': optional: true '@esbuild/netbsd-x64@0.21.5': optional: true - '@esbuild/netbsd-x64@0.25.5': + '@esbuild/netbsd-x64@0.25.12': optional: true - '@esbuild/openbsd-arm64@0.25.5': + '@esbuild/openbsd-arm64@0.25.12': optional: true '@esbuild/openbsd-x64@0.21.5': optional: true - '@esbuild/openbsd-x64@0.25.5': + '@esbuild/openbsd-x64@0.25.12': + optional: true + + '@esbuild/openharmony-arm64@0.25.12': optional: true '@esbuild/sunos-x64@0.21.5': optional: true - '@esbuild/sunos-x64@0.25.5': + '@esbuild/sunos-x64@0.25.12': optional: true '@esbuild/win32-arm64@0.21.5': optional: true - '@esbuild/win32-arm64@0.25.5': + '@esbuild/win32-arm64@0.25.12': optional: true '@esbuild/win32-ia32@0.21.5': optional: true - '@esbuild/win32-ia32@0.25.5': + '@esbuild/win32-ia32@0.25.12': optional: true '@esbuild/win32-x64@0.21.5': optional: true - '@esbuild/win32-x64@0.25.5': + '@esbuild/win32-x64@0.25.12': optional: true - '@jridgewell/gen-mapping@0.3.10': - dependencies: - '@jridgewell/sourcemap-codec': 1.5.2 - '@jridgewell/trace-mapping': 0.3.27 - '@jridgewell/gen-mapping@0.3.13': dependencies: - '@jridgewell/sourcemap-codec': 1.5.2 - '@jridgewell/trace-mapping': 0.3.30 + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 '@jridgewell/resolve-uri@3.1.2': {} - '@jridgewell/sourcemap-codec@1.5.2': {} + '@jridgewell/sourcemap-codec@1.5.5': {} - '@jridgewell/trace-mapping@0.3.27': + '@jridgewell/trace-mapping@0.3.31': dependencies: '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.5.2 - - '@jridgewell/trace-mapping@0.3.30': - dependencies: - '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.5.2 + '@jridgewell/sourcemap-codec': 1.5.5 '@msgpack/msgpack@2.8.0': {} - '@napi-rs/wasm-runtime@0.2.11': + '@napi-rs/wasm-runtime@0.2.12': dependencies: - '@emnapi/core': 1.4.3 - '@emnapi/runtime': 1.4.3 - '@tybys/wasm-util': 0.9.0 + '@emnapi/core': 1.7.1 + '@emnapi/runtime': 1.7.1 + '@tybys/wasm-util': 0.10.1 optional: true '@napi-rs/wasm-runtime@1.0.7': dependencies: - '@emnapi/core': 1.5.0 - '@emnapi/runtime': 1.5.0 + '@emnapi/core': 1.7.1 + '@emnapi/runtime': 1.7.1 '@tybys/wasm-util': 0.10.1 optional: true '@oxc-project/types@0.70.0': {} - '@oxc-project/types@0.95.0': {} + '@oxc-project/types@0.98.0': {} - '@quansync/fs@0.1.3': + '@pinojs/redact@0.4.0': {} + + '@posthog/core@1.5.5': dependencies: - quansync: 0.2.10 + cross-spawn: 7.0.6 - '@rolldown/binding-android-arm64@1.0.0-beta.44': + '@quansync/fs@0.1.5': + dependencies: + quansync: 0.2.11 + + '@rolldown/binding-android-arm64@1.0.0-beta.51': optional: true - '@rolldown/binding-darwin-arm64@1.0.0-beta.44': + '@rolldown/binding-darwin-arm64@1.0.0-beta.51': optional: true '@rolldown/binding-darwin-arm64@1.0.0-beta.9': optional: true - '@rolldown/binding-darwin-x64@1.0.0-beta.44': + '@rolldown/binding-darwin-x64@1.0.0-beta.51': optional: true '@rolldown/binding-darwin-x64@1.0.0-beta.9': optional: true - '@rolldown/binding-freebsd-x64@1.0.0-beta.44': + '@rolldown/binding-freebsd-x64@1.0.0-beta.51': optional: true '@rolldown/binding-freebsd-x64@1.0.0-beta.9': optional: true - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.44': + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.51': optional: true '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.9': optional: true - '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.44': + '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.51': optional: true '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.9': optional: true - '@rolldown/binding-linux-arm64-musl@1.0.0-beta.44': + '@rolldown/binding-linux-arm64-musl@1.0.0-beta.51': optional: true '@rolldown/binding-linux-arm64-musl@1.0.0-beta.9': optional: true - '@rolldown/binding-linux-x64-gnu@1.0.0-beta.44': + '@rolldown/binding-linux-x64-gnu@1.0.0-beta.51': optional: true '@rolldown/binding-linux-x64-gnu@1.0.0-beta.9': optional: true - '@rolldown/binding-linux-x64-musl@1.0.0-beta.44': + '@rolldown/binding-linux-x64-musl@1.0.0-beta.51': optional: true '@rolldown/binding-linux-x64-musl@1.0.0-beta.9': optional: true - '@rolldown/binding-openharmony-arm64@1.0.0-beta.44': + '@rolldown/binding-openharmony-arm64@1.0.0-beta.51': optional: true - '@rolldown/binding-wasm32-wasi@1.0.0-beta.44': + '@rolldown/binding-wasm32-wasi@1.0.0-beta.51': dependencies: '@napi-rs/wasm-runtime': 1.0.7 optional: true '@rolldown/binding-wasm32-wasi@1.0.0-beta.9': dependencies: - '@napi-rs/wasm-runtime': 0.2.11 + '@napi-rs/wasm-runtime': 0.2.12 optional: true - '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.44': + '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.51': optional: true '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.9': optional: true - '@rolldown/binding-win32-ia32-msvc@1.0.0-beta.44': + '@rolldown/binding-win32-ia32-msvc@1.0.0-beta.51': optional: true '@rolldown/binding-win32-ia32-msvc@1.0.0-beta.9': optional: true - '@rolldown/binding-win32-x64-msvc@1.0.0-beta.44': + '@rolldown/binding-win32-x64-msvc@1.0.0-beta.51': optional: true '@rolldown/binding-win32-x64-msvc@1.0.0-beta.9': optional: true - '@rolldown/pluginutils@1.0.0-beta.44': {} + '@rolldown/pluginutils@1.0.0-beta.51': {} '@rolldown/pluginutils@1.0.0-beta.9': {} - '@rollup/rollup-android-arm-eabi@4.44.1': + '@rollup/rollup-android-arm-eabi@4.53.3': optional: true - '@rollup/rollup-android-arm64@4.44.1': + '@rollup/rollup-android-arm64@4.53.3': optional: true - '@rollup/rollup-darwin-arm64@4.44.1': + '@rollup/rollup-darwin-arm64@4.53.3': optional: true - '@rollup/rollup-darwin-x64@4.44.1': + '@rollup/rollup-darwin-x64@4.53.3': optional: true - '@rollup/rollup-freebsd-arm64@4.44.1': + '@rollup/rollup-freebsd-arm64@4.53.3': optional: true - '@rollup/rollup-freebsd-x64@4.44.1': + '@rollup/rollup-freebsd-x64@4.53.3': optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.44.1': + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': optional: true - '@rollup/rollup-linux-arm-musleabihf@4.44.1': + '@rollup/rollup-linux-arm-musleabihf@4.53.3': optional: true - '@rollup/rollup-linux-arm64-gnu@4.44.1': + '@rollup/rollup-linux-arm64-gnu@4.53.3': optional: true - '@rollup/rollup-linux-arm64-musl@4.44.1': + '@rollup/rollup-linux-arm64-musl@4.53.3': optional: true - '@rollup/rollup-linux-loongarch64-gnu@4.44.1': + '@rollup/rollup-linux-loong64-gnu@4.53.3': optional: true - '@rollup/rollup-linux-powerpc64le-gnu@4.44.1': + '@rollup/rollup-linux-ppc64-gnu@4.53.3': optional: true - '@rollup/rollup-linux-riscv64-gnu@4.44.1': + '@rollup/rollup-linux-riscv64-gnu@4.53.3': optional: true - '@rollup/rollup-linux-riscv64-musl@4.44.1': + '@rollup/rollup-linux-riscv64-musl@4.53.3': optional: true - '@rollup/rollup-linux-s390x-gnu@4.44.1': + '@rollup/rollup-linux-s390x-gnu@4.53.3': optional: true - '@rollup/rollup-linux-x64-gnu@4.44.1': + '@rollup/rollup-linux-x64-gnu@4.53.3': optional: true - '@rollup/rollup-linux-x64-musl@4.44.1': + '@rollup/rollup-linux-x64-musl@4.53.3': optional: true - '@rollup/rollup-win32-arm64-msvc@4.44.1': + '@rollup/rollup-openharmony-arm64@4.53.3': optional: true - '@rollup/rollup-win32-ia32-msvc@4.44.1': + '@rollup/rollup-win32-arm64-msvc@4.53.3': optional: true - '@rollup/rollup-win32-x64-msvc@4.44.1': + '@rollup/rollup-win32-ia32-msvc@4.53.3': optional: true - '@trycua/core@0.1.2': - dependencies: - '@types/uuid': 10.0.0 - pino: 9.7.0 - posthog-node: 5.1.1 - uuid: 11.1.0 + '@rollup/rollup-win32-x64-gnu@4.53.3': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.53.3': + optional: true '@tybys/wasm-util@0.10.1': dependencies: tslib: 2.8.1 optional: true - '@tybys/wasm-util@0.9.0': - dependencies: - tslib: 2.8.1 - optional: true - - '@types/chai@5.2.2': + '@types/chai@5.2.3': dependencies: '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 '@types/deep-eql@4.0.2': {} '@types/estree@1.0.8': {} - '@types/node@22.15.34': + '@types/node@22.19.1': dependencies: undici-types: 6.21.0 @@ -1933,38 +1905,36 @@ snapshots: '@types/ws@8.18.1': dependencies: - '@types/node': 22.15.34 + '@types/node': 22.19.1 '@vitest/expect@2.1.9': dependencies: '@vitest/spy': 2.1.9 '@vitest/utils': 2.1.9 - chai: 5.2.0 + chai: 5.3.3 tinyrainbow: 1.2.0 '@vitest/expect@3.2.4': dependencies: - '@types/chai': 5.2.2 + '@types/chai': 5.2.3 '@vitest/spy': 3.2.4 '@vitest/utils': 3.2.4 - chai: 5.2.0 + chai: 5.3.3 tinyrainbow: 2.0.0 - '@vitest/mocker@2.1.9(vite@5.4.19(@types/node@22.15.34))': + '@vitest/mocker@2.1.9(vite@5.4.21)': dependencies: '@vitest/spy': 2.1.9 estree-walker: 3.0.3 - magic-string: 0.30.17 - optionalDependencies: - vite: 5.4.19(@types/node@22.15.34) + magic-string: 0.30.21 + vite: 5.4.21(@types/node@22.19.1) - '@vitest/mocker@3.2.4(vite@7.0.0(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0))': + '@vitest/mocker@3.2.4(vite@7.2.4)': dependencies: '@vitest/spy': 3.2.4 estree-walker: 3.0.3 - magic-string: 0.30.17 - optionalDependencies: - vite: 7.0.0(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0) + magic-string: 0.30.21 + vite: 7.2.4(@types/node@22.19.1)(tsx@4.20.6) '@vitest/pretty-format@2.1.9': dependencies: @@ -1983,18 +1953,18 @@ snapshots: dependencies: '@vitest/utils': 3.2.4 pathe: 2.0.3 - strip-literal: 3.0.0 + strip-literal: 3.1.0 '@vitest/snapshot@2.1.9': dependencies: '@vitest/pretty-format': 2.1.9 - magic-string: 0.30.17 + magic-string: 0.30.21 pathe: 1.1.2 '@vitest/snapshot@3.2.4': dependencies: '@vitest/pretty-format': 3.2.4 - magic-string: 0.30.17 + magic-string: 0.30.21 pathe: 2.0.3 '@vitest/spy@2.1.9': @@ -2003,81 +1973,74 @@ snapshots: '@vitest/spy@3.2.4': dependencies: - tinyspy: 4.0.3 + tinyspy: 4.0.4 '@vitest/utils@2.1.9': dependencies: '@vitest/pretty-format': 2.1.9 - loupe: 3.1.4 + loupe: 3.2.1 tinyrainbow: 1.2.0 '@vitest/utils@3.2.4': dependencies: '@vitest/pretty-format': 3.2.4 - loupe: 3.1.4 + loupe: 3.2.1 tinyrainbow: 2.0.0 - ansis@4.1.0: {} + ansis@4.2.0: {} args-tokenizer@0.3.0: {} assertion-error@2.0.1: {} - ast-kit@2.1.0: + ast-kit@2.2.0: dependencies: - '@babel/parser': 7.27.7 - pathe: 2.0.3 - - ast-kit@2.1.2: - dependencies: - '@babel/parser': 7.28.3 + '@babel/parser': 7.28.5 pathe: 2.0.3 atomic-sleep@1.0.0: {} - birpc@2.4.0: {} + birpc@2.8.0: {} - birpc@2.5.0: {} - - bumpp@10.2.0: + bumpp@10.3.1: dependencies: - ansis: 4.1.0 + ansis: 4.2.0 args-tokenizer: 0.3.0 - c12: 3.0.4 + c12: 3.3.2 cac: 6.7.14 escalade: 3.2.0 jsonc-parser: 3.3.1 - package-manager-detector: 1.3.0 - semver: 7.7.2 - tinyexec: 1.0.1 - tinyglobby: 0.2.14 - yaml: 2.8.0 + package-manager-detector: 1.5.0 + semver: 7.7.3 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + yaml: 2.8.1 transitivePeerDependencies: - magicast - c12@3.0.4: + c12@3.3.2: dependencies: chokidar: 4.0.3 confbox: 0.2.2 defu: 6.1.4 - dotenv: 16.6.1 - exsolve: 1.0.7 + dotenv: 17.2.3 + exsolve: 1.0.8 giget: 2.0.0 - jiti: 2.4.2 + jiti: 2.6.1 ohash: 2.0.11 pathe: 2.0.3 - perfect-debounce: 1.0.0 - pkg-types: 2.1.1 + perfect-debounce: 2.0.0 + pkg-types: 2.3.0 rc9: 2.1.2 cac@6.7.14: {} - chai@5.2.0: + chai@5.3.3: dependencies: assertion-error: 2.0.1 check-error: 2.1.1 deep-eql: 5.0.2 - loupe: 3.1.4 + loupe: 3.2.1 pathval: 2.0.1 check-error@2.1.1: {} @@ -2094,7 +2057,13 @@ snapshots: consola@3.4.2: {} - debug@4.4.1: + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.3: dependencies: ms: 2.1.3 @@ -2106,9 +2075,9 @@ snapshots: diff@8.0.2: {} - dotenv@16.6.1: {} + dotenv@17.2.3: {} - dts-resolver@2.1.1: {} + dts-resolver@2.1.3: {} empathic@1.1.0: {} @@ -2142,33 +2111,34 @@ snapshots: '@esbuild/win32-ia32': 0.21.5 '@esbuild/win32-x64': 0.21.5 - esbuild@0.25.5: + esbuild@0.25.12: optionalDependencies: - '@esbuild/aix-ppc64': 0.25.5 - '@esbuild/android-arm': 0.25.5 - '@esbuild/android-arm64': 0.25.5 - '@esbuild/android-x64': 0.25.5 - '@esbuild/darwin-arm64': 0.25.5 - '@esbuild/darwin-x64': 0.25.5 - '@esbuild/freebsd-arm64': 0.25.5 - '@esbuild/freebsd-x64': 0.25.5 - '@esbuild/linux-arm': 0.25.5 - '@esbuild/linux-arm64': 0.25.5 - '@esbuild/linux-ia32': 0.25.5 - '@esbuild/linux-loong64': 0.25.5 - '@esbuild/linux-mips64el': 0.25.5 - '@esbuild/linux-ppc64': 0.25.5 - '@esbuild/linux-riscv64': 0.25.5 - '@esbuild/linux-s390x': 0.25.5 - '@esbuild/linux-x64': 0.25.5 - '@esbuild/netbsd-arm64': 0.25.5 - '@esbuild/netbsd-x64': 0.25.5 - '@esbuild/openbsd-arm64': 0.25.5 - '@esbuild/openbsd-x64': 0.25.5 - '@esbuild/sunos-x64': 0.25.5 - '@esbuild/win32-arm64': 0.25.5 - '@esbuild/win32-ia32': 0.25.5 - '@esbuild/win32-x64': 0.25.5 + '@esbuild/aix-ppc64': 0.25.12 + '@esbuild/android-arm': 0.25.12 + '@esbuild/android-arm64': 0.25.12 + '@esbuild/android-x64': 0.25.12 + '@esbuild/darwin-arm64': 0.25.12 + '@esbuild/darwin-x64': 0.25.12 + '@esbuild/freebsd-arm64': 0.25.12 + '@esbuild/freebsd-x64': 0.25.12 + '@esbuild/linux-arm': 0.25.12 + '@esbuild/linux-arm64': 0.25.12 + '@esbuild/linux-ia32': 0.25.12 + '@esbuild/linux-loong64': 0.25.12 + '@esbuild/linux-mips64el': 0.25.12 + '@esbuild/linux-ppc64': 0.25.12 + '@esbuild/linux-riscv64': 0.25.12 + '@esbuild/linux-s390x': 0.25.12 + '@esbuild/linux-x64': 0.25.12 + '@esbuild/netbsd-arm64': 0.25.12 + '@esbuild/netbsd-x64': 0.25.12 + '@esbuild/openbsd-arm64': 0.25.12 + '@esbuild/openbsd-x64': 0.25.12 + '@esbuild/openharmony-arm64': 0.25.12 + '@esbuild/sunos-x64': 0.25.12 + '@esbuild/win32-arm64': 0.25.12 + '@esbuild/win32-ia32': 0.25.12 + '@esbuild/win32-x64': 0.25.12 escalade@3.2.0: {} @@ -2178,20 +2148,18 @@ snapshots: eventemitter3@4.0.7: {} - expect-type@1.2.1: {} + expect-type@1.2.2: {} - exsolve@1.0.7: {} + exsolve@1.0.8: {} - fast-redact@3.5.0: {} - - fdir@6.4.6(picomatch@4.0.2): - optionalDependencies: - picomatch: 4.0.2 + fdir@6.5.0(picomatch@4.0.3): + dependencies: + picomatch: 4.0.3 fsevents@2.3.3: optional: true - get-tsconfig@4.10.1: + get-tsconfig@4.13.0: dependencies: resolve-pkg-maps: 1.0.0 @@ -2200,8 +2168,8 @@ snapshots: citty: 0.1.6 consola: 3.4.2 defu: 6.1.4 - node-fetch-native: 1.6.6 - nypm: 0.6.0 + node-fetch-native: 1.6.7 + nypm: 0.6.2 pathe: 2.0.3 happy-dom@17.6.3: @@ -2211,7 +2179,9 @@ snapshots: hookable@5.5.3: {} - jiti@2.4.2: {} + isexe@2.0.0: {} + + jiti@2.6.1: {} js-tokens@9.0.1: {} @@ -2219,31 +2189,33 @@ snapshots: jsonc-parser@3.3.1: {} - loupe@3.1.4: {} + loupe@3.2.1: {} - magic-string@0.30.17: + magic-string@0.30.21: dependencies: - '@jridgewell/sourcemap-codec': 1.5.2 + '@jridgewell/sourcemap-codec': 1.5.5 ms@2.1.3: {} nanoid@3.3.11: {} - node-fetch-native@1.6.6: {} + node-fetch-native@1.6.7: {} - nypm@0.6.0: + nypm@0.6.2: dependencies: citty: 0.1.6 consola: 3.4.2 pathe: 2.0.3 - pkg-types: 2.1.1 - tinyexec: 0.3.2 + pkg-types: 2.3.0 + tinyexec: 1.0.2 ohash@2.0.11: {} on-exit-leak-free@2.1.2: {} - package-manager-detector@1.3.0: {} + package-manager-detector@1.5.0: {} + + path-key@3.1.1: {} pathe@1.1.2: {} @@ -2260,11 +2232,11 @@ snapshots: peerjs-js-binarypack: 2.1.0 webrtc-adapter: 9.0.3 - perfect-debounce@1.0.0: {} + perfect-debounce@2.0.0: {} picocolors@1.1.1: {} - picomatch@4.0.2: {} + picomatch@4.0.3: {} pino-abstract-transport@2.0.0: dependencies: @@ -2272,10 +2244,10 @@ snapshots: pino-std-serializers@7.0.0: {} - pino@9.7.0: + pino@9.14.0: dependencies: + '@pinojs/redact': 0.4.0 atomic-sleep: 1.0.0 - fast-redact: 3.5.0 on-exit-leak-free: 2.1.2 pino-abstract-transport: 2.0.0 pino-std-serializers: 7.0.0 @@ -2286,10 +2258,10 @@ snapshots: sonic-boom: 4.2.0 thread-stream: 3.1.0 - pkg-types@2.1.1: + pkg-types@2.3.0: dependencies: confbox: 0.2.2 - exsolve: 1.0.7 + exsolve: 1.0.8 pathe: 2.0.3 postcss@8.5.6: @@ -2298,13 +2270,15 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 - posthog-node@5.1.1: {} + posthog-node@5.13.3: + dependencies: + '@posthog/core': 1.5.5 prettier@3.6.2: {} process-warning@5.0.0: {} - quansync@0.2.10: {} + quansync@0.2.11: {} quick-format-unescaped@4.0.4: {} @@ -2319,65 +2293,63 @@ snapshots: resolve-pkg-maps@1.0.0: {} - rolldown-plugin-dts@0.13.13(rolldown@1.0.0-beta.9)(typescript@5.8.3): + rolldown-plugin-dts@0.13.14(rolldown@1.0.0-beta.9)(typescript@5.9.3): dependencies: - '@babel/generator': 7.27.5 - '@babel/parser': 7.27.7 - '@babel/types': 7.27.7 - ast-kit: 2.1.0 - birpc: 2.4.0 - debug: 4.4.1 - dts-resolver: 2.1.1 - get-tsconfig: 4.10.1 + '@babel/generator': 7.28.5 + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + ast-kit: 2.2.0 + birpc: 2.8.0 + debug: 4.4.3 + dts-resolver: 2.1.3 + get-tsconfig: 4.13.0 rolldown: 1.0.0-beta.9 - optionalDependencies: - typescript: 5.8.3 + typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver - supports-color - rolldown-plugin-dts@0.15.7(rolldown@1.0.0-beta.44)(typescript@5.8.3): + rolldown-plugin-dts@0.15.10(rolldown@1.0.0-beta.51)(typescript@5.9.3): dependencies: - '@babel/generator': 7.28.3 - '@babel/parser': 7.28.3 - '@babel/types': 7.28.2 - ast-kit: 2.1.2 - birpc: 2.5.0 - debug: 4.4.1 - dts-resolver: 2.1.1 - get-tsconfig: 4.10.1 - rolldown: 1.0.0-beta.44 - optionalDependencies: - typescript: 5.8.3 + '@babel/generator': 7.28.5 + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + ast-kit: 2.2.0 + birpc: 2.8.0 + debug: 4.4.3 + dts-resolver: 2.1.3 + get-tsconfig: 4.13.0 + rolldown: 1.0.0-beta.51 + typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver - supports-color - rolldown@1.0.0-beta.44: + rolldown@1.0.0-beta.51: dependencies: - '@oxc-project/types': 0.95.0 - '@rolldown/pluginutils': 1.0.0-beta.44 + '@oxc-project/types': 0.98.0 + '@rolldown/pluginutils': 1.0.0-beta.51 optionalDependencies: - '@rolldown/binding-android-arm64': 1.0.0-beta.44 - '@rolldown/binding-darwin-arm64': 1.0.0-beta.44 - '@rolldown/binding-darwin-x64': 1.0.0-beta.44 - '@rolldown/binding-freebsd-x64': 1.0.0-beta.44 - '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-beta.44 - '@rolldown/binding-linux-arm64-gnu': 1.0.0-beta.44 - '@rolldown/binding-linux-arm64-musl': 1.0.0-beta.44 - '@rolldown/binding-linux-x64-gnu': 1.0.0-beta.44 - '@rolldown/binding-linux-x64-musl': 1.0.0-beta.44 - '@rolldown/binding-openharmony-arm64': 1.0.0-beta.44 - '@rolldown/binding-wasm32-wasi': 1.0.0-beta.44 - '@rolldown/binding-win32-arm64-msvc': 1.0.0-beta.44 - '@rolldown/binding-win32-ia32-msvc': 1.0.0-beta.44 - '@rolldown/binding-win32-x64-msvc': 1.0.0-beta.44 + '@rolldown/binding-android-arm64': 1.0.0-beta.51 + '@rolldown/binding-darwin-arm64': 1.0.0-beta.51 + '@rolldown/binding-darwin-x64': 1.0.0-beta.51 + '@rolldown/binding-freebsd-x64': 1.0.0-beta.51 + '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-beta.51 + '@rolldown/binding-linux-arm64-gnu': 1.0.0-beta.51 + '@rolldown/binding-linux-arm64-musl': 1.0.0-beta.51 + '@rolldown/binding-linux-x64-gnu': 1.0.0-beta.51 + '@rolldown/binding-linux-x64-musl': 1.0.0-beta.51 + '@rolldown/binding-openharmony-arm64': 1.0.0-beta.51 + '@rolldown/binding-wasm32-wasi': 1.0.0-beta.51 + '@rolldown/binding-win32-arm64-msvc': 1.0.0-beta.51 + '@rolldown/binding-win32-ia32-msvc': 1.0.0-beta.51 + '@rolldown/binding-win32-x64-msvc': 1.0.0-beta.51 rolldown@1.0.0-beta.9: dependencies: '@oxc-project/types': 0.70.0 '@rolldown/pluginutils': 1.0.0-beta.9 - ansis: 4.1.0 + ansis: 4.2.0 optionalDependencies: '@rolldown/binding-darwin-arm64': 1.0.0-beta.9 '@rolldown/binding-darwin-x64': 1.0.0-beta.9 @@ -2392,37 +2364,45 @@ snapshots: '@rolldown/binding-win32-ia32-msvc': 1.0.0-beta.9 '@rolldown/binding-win32-x64-msvc': 1.0.0-beta.9 - rollup@4.44.1: + rollup@4.53.3: dependencies: '@types/estree': 1.0.8 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.44.1 - '@rollup/rollup-android-arm64': 4.44.1 - '@rollup/rollup-darwin-arm64': 4.44.1 - '@rollup/rollup-darwin-x64': 4.44.1 - '@rollup/rollup-freebsd-arm64': 4.44.1 - '@rollup/rollup-freebsd-x64': 4.44.1 - '@rollup/rollup-linux-arm-gnueabihf': 4.44.1 - '@rollup/rollup-linux-arm-musleabihf': 4.44.1 - '@rollup/rollup-linux-arm64-gnu': 4.44.1 - '@rollup/rollup-linux-arm64-musl': 4.44.1 - '@rollup/rollup-linux-loongarch64-gnu': 4.44.1 - '@rollup/rollup-linux-powerpc64le-gnu': 4.44.1 - '@rollup/rollup-linux-riscv64-gnu': 4.44.1 - '@rollup/rollup-linux-riscv64-musl': 4.44.1 - '@rollup/rollup-linux-s390x-gnu': 4.44.1 - '@rollup/rollup-linux-x64-gnu': 4.44.1 - '@rollup/rollup-linux-x64-musl': 4.44.1 - '@rollup/rollup-win32-arm64-msvc': 4.44.1 - '@rollup/rollup-win32-ia32-msvc': 4.44.1 - '@rollup/rollup-win32-x64-msvc': 4.44.1 + '@rollup/rollup-android-arm-eabi': 4.53.3 + '@rollup/rollup-android-arm64': 4.53.3 + '@rollup/rollup-darwin-arm64': 4.53.3 + '@rollup/rollup-darwin-x64': 4.53.3 + '@rollup/rollup-freebsd-arm64': 4.53.3 + '@rollup/rollup-freebsd-x64': 4.53.3 + '@rollup/rollup-linux-arm-gnueabihf': 4.53.3 + '@rollup/rollup-linux-arm-musleabihf': 4.53.3 + '@rollup/rollup-linux-arm64-gnu': 4.53.3 + '@rollup/rollup-linux-arm64-musl': 4.53.3 + '@rollup/rollup-linux-loong64-gnu': 4.53.3 + '@rollup/rollup-linux-ppc64-gnu': 4.53.3 + '@rollup/rollup-linux-riscv64-gnu': 4.53.3 + '@rollup/rollup-linux-riscv64-musl': 4.53.3 + '@rollup/rollup-linux-s390x-gnu': 4.53.3 + '@rollup/rollup-linux-x64-gnu': 4.53.3 + '@rollup/rollup-linux-x64-musl': 4.53.3 + '@rollup/rollup-openharmony-arm64': 4.53.3 + '@rollup/rollup-win32-arm64-msvc': 4.53.3 + '@rollup/rollup-win32-ia32-msvc': 4.53.3 + '@rollup/rollup-win32-x64-gnu': 4.53.3 + '@rollup/rollup-win32-x64-msvc': 4.53.3 fsevents: 2.3.3 safe-stable-stringify@2.5.0: {} sdp@3.2.1: {} - semver@7.7.2: {} + semver@7.7.3: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} siginfo@2.0.0: {} @@ -2436,9 +2416,9 @@ snapshots: stackback@0.0.2: {} - std-env@3.9.0: {} + std-env@3.10.0: {} - strip-literal@3.0.0: + strip-literal@3.1.0: dependencies: js-tokens: 9.0.1 @@ -2450,12 +2430,12 @@ snapshots: tinyexec@0.3.2: {} - tinyexec@1.0.1: {} + tinyexec@1.0.2: {} - tinyglobby@0.2.14: + tinyglobby@0.2.15: dependencies: - fdir: 6.4.6(picomatch@4.0.2) - picomatch: 4.0.2 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 tinypool@1.1.1: {} @@ -2465,27 +2445,26 @@ snapshots: tinyspy@3.0.2: {} - tinyspy@4.0.3: {} + tinyspy@4.0.4: {} tree-kill@1.2.2: {} - tsdown@0.11.13(typescript@5.8.3): + tsdown@0.11.13(typescript@5.9.3): dependencies: - ansis: 4.1.0 + ansis: 4.2.0 cac: 6.7.14 chokidar: 4.0.3 - debug: 4.4.1 + debug: 4.4.3 diff: 8.0.2 empathic: 1.1.0 hookable: 5.5.3 rolldown: 1.0.0-beta.9 - rolldown-plugin-dts: 0.13.13(rolldown@1.0.0-beta.9)(typescript@5.8.3) - semver: 7.7.2 - tinyexec: 1.0.1 - tinyglobby: 0.2.14 - unconfig: 7.3.2 - optionalDependencies: - typescript: 5.8.3 + rolldown-plugin-dts: 0.13.14(rolldown@1.0.0-beta.9)(typescript@5.9.3) + semver: 7.7.3 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + typescript: 5.9.3 + unconfig: 7.4.1 transitivePeerDependencies: - '@oxc-project/runtime' - '@typescript/native-preview' @@ -2493,24 +2472,23 @@ snapshots: - supports-color - vue-tsc - tsdown@0.14.1(typescript@5.8.3): + tsdown@0.14.2(typescript@5.9.3): dependencies: - ansis: 4.1.0 + ansis: 4.2.0 cac: 6.7.14 chokidar: 4.0.3 - debug: 4.4.1 + debug: 4.4.3 diff: 8.0.2 empathic: 2.0.0 hookable: 5.5.3 - rolldown: 1.0.0-beta.44 - rolldown-plugin-dts: 0.15.7(rolldown@1.0.0-beta.44)(typescript@5.8.3) - semver: 7.7.2 - tinyexec: 1.0.1 - tinyglobby: 0.2.14 + rolldown: 1.0.0-beta.51 + rolldown-plugin-dts: 0.15.10(rolldown@1.0.0-beta.51)(typescript@5.9.3) + semver: 7.7.3 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 tree-kill: 1.2.2 - unconfig: 7.3.2 - optionalDependencies: - typescript: 5.8.3 + typescript: 5.9.3 + unconfig: 7.4.1 transitivePeerDependencies: - '@typescript/native-preview' - oxc-resolver @@ -2520,33 +2498,39 @@ snapshots: tslib@2.8.1: optional: true - tsx@4.20.3: + tsx@4.20.6: dependencies: - esbuild: 0.25.5 - get-tsconfig: 4.10.1 + esbuild: 0.25.12 + get-tsconfig: 4.13.0 optionalDependencies: fsevents: 2.3.3 - typescript@5.8.3: {} + typescript@5.9.3: {} - unconfig@7.3.2: + unconfig-core@7.4.1: dependencies: - '@quansync/fs': 0.1.3 + '@quansync/fs': 0.1.5 + quansync: 0.2.11 + + unconfig@7.4.1: + dependencies: + '@quansync/fs': 0.1.5 defu: 6.1.4 - jiti: 2.4.2 - quansync: 0.2.10 + jiti: 2.6.1 + quansync: 0.2.11 + unconfig-core: 7.4.1 undici-types@6.21.0: {} uuid@11.1.0: {} - vite-node@2.1.9(@types/node@22.15.34): + vite-node@2.1.9(@types/node@22.19.1): dependencies: cac: 6.7.14 - debug: 4.4.1 + debug: 4.4.3 es-module-lexer: 1.7.0 pathe: 1.1.2 - vite: 5.4.19(@types/node@22.15.34) + vite: 5.4.21(@types/node@22.19.1) transitivePeerDependencies: - '@types/node' - less @@ -2558,13 +2542,13 @@ snapshots: - supports-color - terser - vite-node@3.2.4(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0): + vite-node@3.2.4(@types/node@22.19.1)(tsx@4.20.6): dependencies: cac: 6.7.14 - debug: 4.4.1 + debug: 4.4.3 es-module-lexer: 1.7.0 pathe: 2.0.3 - vite: 7.0.0(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0) + vite: 7.2.4(@types/node@22.19.1)(tsx@4.20.6) transitivePeerDependencies: - '@types/node' - jiti @@ -2579,55 +2563,52 @@ snapshots: - tsx - yaml - vite@5.4.19(@types/node@22.15.34): + vite@5.4.21(@types/node@22.19.1): dependencies: + '@types/node': 22.19.1 esbuild: 0.21.5 postcss: 8.5.6 - rollup: 4.44.1 + rollup: 4.53.3 optionalDependencies: - '@types/node': 22.15.34 fsevents: 2.3.3 - vite@7.0.0(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0): + vite@7.2.4(@types/node@22.19.1)(tsx@4.20.6): dependencies: - esbuild: 0.25.5 - fdir: 6.4.6(picomatch@4.0.2) - picomatch: 4.0.2 + '@types/node': 22.19.1 + esbuild: 0.25.12 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 postcss: 8.5.6 - rollup: 4.44.1 - tinyglobby: 0.2.14 + rollup: 4.53.3 + tinyglobby: 0.2.15 + tsx: 4.20.6 optionalDependencies: - '@types/node': 22.15.34 fsevents: 2.3.3 - jiti: 2.4.2 - tsx: 4.20.3 - yaml: 2.8.0 - vitest@2.1.9(@types/node@22.15.34)(happy-dom@17.6.3): + vitest@2.1.9(@types/node@22.19.1)(happy-dom@17.6.3): dependencies: + '@types/node': 22.19.1 '@vitest/expect': 2.1.9 - '@vitest/mocker': 2.1.9(vite@5.4.19(@types/node@22.15.34)) + '@vitest/mocker': 2.1.9(vite@5.4.21) '@vitest/pretty-format': 2.1.9 '@vitest/runner': 2.1.9 '@vitest/snapshot': 2.1.9 '@vitest/spy': 2.1.9 '@vitest/utils': 2.1.9 - chai: 5.2.0 - debug: 4.4.1 - expect-type: 1.2.1 - magic-string: 0.30.17 + chai: 5.3.3 + debug: 4.4.3 + expect-type: 1.2.2 + happy-dom: 17.6.3 + magic-string: 0.30.21 pathe: 1.1.2 - std-env: 3.9.0 + std-env: 3.10.0 tinybench: 2.9.0 tinyexec: 0.3.2 tinypool: 1.1.1 tinyrainbow: 1.2.0 - vite: 5.4.19(@types/node@22.15.34) - vite-node: 2.1.9(@types/node@22.15.34) + vite: 5.4.21(@types/node@22.19.1) + vite-node: 2.1.9(@types/node@22.19.1) why-is-node-running: 2.3.0 - optionalDependencies: - '@types/node': 22.15.34 - happy-dom: 17.6.3 transitivePeerDependencies: - less - lightningcss @@ -2639,34 +2620,33 @@ snapshots: - supports-color - terser - vitest@3.2.4(@types/node@22.15.34)(happy-dom@17.6.3)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0): + vitest@3.2.4(@types/node@22.19.1)(happy-dom@17.6.3)(tsx@4.20.6): dependencies: - '@types/chai': 5.2.2 + '@types/chai': 5.2.3 + '@types/node': 22.19.1 '@vitest/expect': 3.2.4 - '@vitest/mocker': 3.2.4(vite@7.0.0(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0)) + '@vitest/mocker': 3.2.4(vite@7.2.4) '@vitest/pretty-format': 3.2.4 '@vitest/runner': 3.2.4 '@vitest/snapshot': 3.2.4 '@vitest/spy': 3.2.4 '@vitest/utils': 3.2.4 - chai: 5.2.0 - debug: 4.4.1 - expect-type: 1.2.1 - magic-string: 0.30.17 + chai: 5.3.3 + debug: 4.4.3 + expect-type: 1.2.2 + happy-dom: 17.6.3 + magic-string: 0.30.21 pathe: 2.0.3 - picomatch: 4.0.2 - std-env: 3.9.0 + picomatch: 4.0.3 + std-env: 3.10.0 tinybench: 2.9.0 tinyexec: 0.3.2 - tinyglobby: 0.2.14 + tinyglobby: 0.2.15 tinypool: 1.1.1 tinyrainbow: 2.0.0 - vite: 7.0.0(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0) - vite-node: 3.2.4(@types/node@22.15.34)(jiti@2.4.2)(tsx@4.20.3)(yaml@2.8.0) + vite: 7.2.4(@types/node@22.19.1)(tsx@4.20.6) + vite-node: 3.2.4(@types/node@22.19.1)(tsx@4.20.6) why-is-node-running: 2.3.0 - optionalDependencies: - '@types/node': 22.15.34 - happy-dom: 17.6.3 transitivePeerDependencies: - jiti - less @@ -2689,6 +2669,10 @@ snapshots: whatwg-mimetype@3.0.0: {} + which@2.0.2: + dependencies: + isexe: 2.0.0 + why-is-node-running@2.3.0: dependencies: siginfo: 2.0.0 @@ -2696,4 +2680,4 @@ snapshots: ws@8.18.3: {} - yaml@2.8.0: {} + yaml@2.8.1: {} diff --git a/notebooks/agent_nb.ipynb b/notebooks/agent_nb.ipynb index 69959037..857a950c 100644 --- a/notebooks/agent_nb.ipynb +++ b/notebooks/agent_nb.ipynb @@ -59,10 +59,8 @@ "import os\n", "\n", "# Get API keys from environment or prompt user\n", - "anthropic_key = os.getenv(\"ANTHROPIC_API_KEY\") or \\\n", - " input(\"Enter your Anthropic API key: \")\n", - "openai_key = os.getenv(\"OPENAI_API_KEY\") or \\\n", - " input(\"Enter your OpenAI API key: \")\n", + "anthropic_key = os.getenv(\"ANTHROPIC_API_KEY\") or input(\"Enter your Anthropic API key: \")\n", + "openai_key = os.getenv(\"OPENAI_API_KEY\") or input(\"Enter your OpenAI API key: \")\n", "\n", "os.environ[\"ANTHROPIC_API_KEY\"] = anthropic_key\n", "os.environ[\"OPENAI_API_KEY\"] = openai_key" @@ -95,10 +93,8 @@ "metadata": {}, "outputs": [], "source": [ - "cua_api_key = os.getenv(\"CUA_API_KEY\") or \\\n", - " input(\"Enter your Cua API Key: \")\n", - "container_name = os.getenv(\"CONTAINER_NAME\") or \\\n", - " input(\"Enter your Cloud Container name: \")" + "cua_api_key = os.getenv(\"CUA_API_KEY\") or input(\"Enter your Cua API Key: \")\n", + "container_name = os.getenv(\"CONTAINER_NAME\") or input(\"Enter your Cloud Container name: \")" ] }, { @@ -114,7 +110,9 @@ "metadata": {}, "outputs": [], "source": [ - "os_type = input(\"Enter the OS type of your sandbox (linux/macos) [default: linux]: \").lower() or \"linux\"" + "os_type = (\n", + " input(\"Enter the OS type of your sandbox (linux/macos) [default: linux]: \").lower() or \"linux\"\n", + ")" ] }, { @@ -139,7 +137,7 @@ " api_key=cua_api_key,\n", " name=container_name,\n", " provider_type=VMProviderType.CLOUD,\n", - " verbosity=logging.INFO\n", + " verbosity=logging.INFO,\n", ")\n", "\n", "# Create agent\n", @@ -148,8 +146,8 @@ " tools=[computer],\n", " trajectory_dir=str(Path(\"trajectories\")),\n", " only_n_most_recent_images=3,\n", - " verbosity=logging.INFO\n", - ")\n" + " verbosity=logging.INFO,\n", + ")" ] }, { @@ -168,7 +166,7 @@ "tasks = [\n", " \"Open a web browser and navigate to GitHub\",\n", " \"Search for the trycua/cua repository\",\n", - " \"Take a screenshot of the repository page\"\n", + " \"Take a screenshot of the repository page\",\n", "]\n", "\n", "for i, task in enumerate(tasks):\n", @@ -176,7 +174,7 @@ " async for result in agent.run(task):\n", " # print(result)\n", " pass\n", - " print(f\"βœ… Task {i+1}/{len(tasks)} completed: {task}\")\n" + " print(f\"βœ… Task {i+1}/{len(tasks)} completed: {task}\")" ] }, { @@ -206,7 +204,7 @@ " os_type=\"linux\",\n", " provider_type=\"docker\",\n", " image=\"trycua/cua-ubuntu:latest\",\n", - " name=\"my-cua-container\"\n", + " name=\"my-cua-container\",\n", ")" ] }, @@ -237,12 +235,12 @@ "\n", "\n", "computer = Computer(\n", - " verbosity=logging.INFO, \n", + " verbosity=logging.INFO,\n", " provider_type=VMProviderType.LUME,\n", " display=\"1024x768\",\n", " memory=\"8GB\",\n", " cpu=\"4\",\n", - " os_type=\"macos\"\n", + " os_type=\"macos\",\n", ")" ] }, @@ -272,7 +270,7 @@ " tools=[computer],\n", " trajectory_dir=str(Path(\"trajectories\")),\n", " only_n_most_recent_images=3,\n", - " verbosity=logging.INFO\n", + " verbosity=logging.INFO,\n", ")" ] }, @@ -328,14 +326,14 @@ "\n", "# Create agent with Anthropic loop and provider\n", "agent = ComputerAgent(\n", - " model=\"omniparser+ollama_chat/gemma3:12b-it-q4_K_M\",\n", - " # model=\"omniparser+openai/gpt-4o-mini\",\n", - " # model=\"omniparser+anthropic/claude-3-7-sonnet-20250219\",\n", - " tools=[computer],\n", - " trajectory_dir=str(Path(\"trajectories\")),\n", - " only_n_most_recent_images=3,\n", - " verbosity=logging.INFO\n", - " )\n", + " model=\"omniparser+ollama_chat/gemma3:12b-it-q4_K_M\",\n", + " # model=\"omniparser+openai/gpt-4o-mini\",\n", + " # model=\"omniparser+anthropic/claude-3-7-sonnet-20250219\",\n", + " tools=[computer],\n", + " trajectory_dir=str(Path(\"trajectories\")),\n", + " only_n_most_recent_images=3,\n", + " verbosity=logging.INFO,\n", + ")\n", "\n", "tasks = [\n", " \"Look for a repository named trycua/cua on GitHub.\",\n", @@ -414,7 +412,7 @@ " tools=[computer], # Can be cloud or local\n", " model=\"openai/computer-use-preview\",\n", " trajectory_dir=str(Path(\"trajectories\")),\n", - " verbosity=logging.INFO\n", + " verbosity=logging.INFO,\n", ")" ] }, @@ -433,10 +431,10 @@ "source": [ "anthropic_agent = ComputerAgent(\n", " tools=[computer],\n", - " model=\"anthropic/claude-3-5-sonnet-20241022\",\n", + " model=\"anthropic/claude-sonnet-4-5-20250929\",\n", " trajectory_dir=str(Path(\"trajectories\")),\n", - " verbosity=logging.INFO\n", - ")\n" + " verbosity=logging.INFO,\n", + ")" ] }, { @@ -458,8 +456,8 @@ " # model=\"omniparser+openai/gpt-4o-mini\",\n", " trajectory_dir=str(Path(\"trajectories\")),\n", " only_n_most_recent_images=3,\n", - " verbosity=logging.INFO\n", - ")\n" + " verbosity=logging.INFO,\n", + ")" ] }, { @@ -477,12 +475,12 @@ "source": [ "uitars_agent = ComputerAgent(\n", " tools=[computer],\n", - " model=\"mlx/mlx-community/UI-TARS-1.5-7B-6bit\", # local MLX\n", + " model=\"mlx/mlx-community/UI-TARS-1.5-7B-6bit\", # local MLX\n", " # model=\"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B\", # local Huggingface (transformers)\n", " # model=\"huggingface/ByteDance-Seed/UI-TARS-1.5-7B\", # remote Huggingface (TGI)\n", " trajectory_dir=str(Path(\"trajectories\")),\n", - " verbosity=logging.INFO\n", - ")\n" + " verbosity=logging.INFO,\n", + ")" ] }, { @@ -502,7 +500,10 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": "print(f\"Trajectories saved to: {Path('trajectories').absolute()}\")\nprint(\"Upload trajectory files to https://cua.ai/trajectory-viewer to visualize agent actions\")\n" + "source": [ + "print(f\"Trajectories saved to: {Path('trajectories').absolute()}\")\n", + "print(\"Upload trajectory files to https://cua.ai/trajectory-viewer to visualize agent actions\")" + ] } ], "metadata": { diff --git a/notebooks/blog/build-your-own-operator-on-macos-1.ipynb b/notebooks/blog/build-your-own-operator-on-macos-1.ipynb index cdca7b5f..03f05f2b 100644 --- a/notebooks/blog/build-your-own-operator-on-macos-1.ipynb +++ b/notebooks/blog/build-your-own-operator-on-macos-1.ipynb @@ -96,7 +96,7 @@ "source": [ "async def execute_action(computer, action):\n", " action_type = action.type\n", - " \n", + "\n", " if action_type == \"click\":\n", " x = action.x\n", " y = action.y\n", @@ -107,12 +107,12 @@ " await computer.interface.right_click()\n", " else:\n", " await computer.interface.left_click()\n", - " \n", + "\n", " elif action_type == \"type\":\n", " text = action.text\n", " print(f\"Typing text: {text}\")\n", " await computer.interface.type_text(text)\n", - " \n", + "\n", " elif action_type == \"scroll\":\n", " x = action.x\n", " y = action.y\n", @@ -121,7 +121,7 @@ " print(f\"Scrolling at ({x}, {y}) with offsets (scroll_x={scroll_x}, scroll_y={scroll_y})\")\n", " await computer.interface.move_cursor(x, y)\n", " await computer.interface.scroll(scroll_y) # Assuming CUA provides a scroll method\n", - " \n", + "\n", " elif action_type == \"keypress\":\n", " keys = action.keys\n", " for key in keys:\n", @@ -133,17 +133,17 @@ " await computer.interface.press_key(\"space\")\n", " else:\n", " await computer.interface.press_key(key)\n", - " \n", + "\n", " elif action_type == \"wait\":\n", - " print(f\"Waiting for 2 seconds\")\n", + " print(\"Waiting for 2 seconds\")\n", " await asyncio.sleep(2)\n", - " \n", + "\n", " elif action_type == \"screenshot\":\n", " print(\"Taking screenshot\")\n", " # This is handled automatically in the main loop, but we can take an extra one if requested\n", " screenshot = await computer.interface.screenshot()\n", " return screenshot\n", - " \n", + "\n", " else:\n", " print(f\"Unrecognized action: {action_type}\")" ] @@ -173,43 +173,45 @@ "source": [ "async def cua_openai_loop():\n", " # Initialize the CUA computer instance (macOS sandbox)\n", - " async with Computer(\n", - " display=\"1024x768\",\n", - " memory=\"4GB\",\n", - " cpu=\"2\",\n", - " os_type=\"macos\"\n", - " ) as computer:\n", + " async with Computer(display=\"1024x768\", memory=\"4GB\", cpu=\"2\", os_type=\"macos\") as computer:\n", " await computer.run()\n", - " \n", + "\n", " # Capture the initial screenshot\n", " screenshot = await computer.interface.screenshot()\n", - " screenshot_base64 = base64.b64encode(screenshot).decode('utf-8')\n", + " screenshot_base64 = base64.b64encode(screenshot).decode(\"utf-8\")\n", "\n", " # Initial request to start the loop\n", " response = openai.responses.create(\n", " model=\"computer-use-preview\",\n", - " tools=[{\n", - " \"type\": \"computer_use_preview\",\n", - " \"display_width\": 1024,\n", - " \"display_height\": 768,\n", - " \"environment\": \"mac\"\n", - " }],\n", - " input=[\n", - " { # type: ignore\n", - " \"role\": \"user\", \n", - " \"content\": [\n", - " {\"type\": \"input_text\", \"text\": \"Open Safari, download and install Cursor.\"},\n", - " {\"type\": \"input_image\", \"image_url\": f\"data:image/png;base64,{screenshot_base64}\"}\n", - " ]\n", + " tools=[\n", + " {\n", + " \"type\": \"computer_use_preview\",\n", + " \"display_width\": 1024,\n", + " \"display_height\": 768,\n", + " \"environment\": \"mac\",\n", " }\n", " ],\n", - " truncation=\"auto\"\n", + " input=[\n", + " { # type: ignore\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\"type\": \"input_text\", \"text\": \"Open Safari, download and install Cursor.\"},\n", + " {\n", + " \"type\": \"input_image\",\n", + " \"image_url\": f\"data:image/png;base64,{screenshot_base64}\",\n", + " },\n", + " ],\n", + " }\n", + " ],\n", + " truncation=\"auto\",\n", " )\n", "\n", " # Continue the loop until no more computer_call actions\n", " while True:\n", " # Check for computer_call actions\n", - " computer_calls = [item for item in response.output if item and item.type == \"computer_call\"]\n", + " computer_calls = [\n", + " item for item in response.output if item and item.type == \"computer_call\"\n", + " ]\n", " if not computer_calls:\n", " print(\"No more computer calls. Loop complete.\")\n", " break\n", @@ -234,33 +236,38 @@ "\n", " # Capture new screenshot after action\n", " new_screenshot = await computer.interface.screenshot()\n", - " new_screenshot_base64 = base64.b64encode(new_screenshot).decode('utf-8')\n", + " new_screenshot_base64 = base64.b64encode(new_screenshot).decode(\"utf-8\")\n", "\n", " # Send the screenshot back as computer_call_output\n", " response = openai.responses.create(\n", " model=\"computer-use-preview\",\n", " previous_response_id=response.id, # Link to previous response\n", - " tools=[{\n", - " \"type\": \"computer_use_preview\",\n", - " \"display_width\": 1024,\n", - " \"display_height\": 768,\n", - " \"environment\": \"mac\"\n", - " }],\n", - " input=[{ # type: ignore\n", - " \"type\": \"computer_call_output\",\n", - " \"call_id\": last_call_id,\n", - " \"acknowledged_safety_checks\": acknowledged_checks,\n", - " \"output\": {\n", - " \"type\": \"input_image\",\n", - " \"image_url\": f\"data:image/png;base64,{new_screenshot_base64}\"\n", + " tools=[\n", + " {\n", + " \"type\": \"computer_use_preview\",\n", + " \"display_width\": 1024,\n", + " \"display_height\": 768,\n", + " \"environment\": \"mac\",\n", " }\n", - " }],\n", - " truncation=\"auto\"\n", + " ],\n", + " input=[\n", + " { # type: ignore\n", + " \"type\": \"computer_call_output\",\n", + " \"call_id\": last_call_id,\n", + " \"acknowledged_safety_checks\": acknowledged_checks,\n", + " \"output\": {\n", + " \"type\": \"input_image\",\n", + " \"image_url\": f\"data:image/png;base64,{new_screenshot_base64}\",\n", + " },\n", + " }\n", + " ],\n", + " truncation=\"auto\",\n", " )\n", "\n", " # End the session\n", " await computer.stop()\n", "\n", + "\n", "# Run the loop\n", "await cua_openai_loop()" ] diff --git a/notebooks/blog/build-your-own-operator-on-macos-2.ipynb b/notebooks/blog/build-your-own-operator-on-macos-2.ipynb index c38ea04e..d3c48058 100644 --- a/notebooks/blog/build-your-own-operator-on-macos-2.ipynb +++ b/notebooks/blog/build-your-own-operator-on-macos-2.ipynb @@ -128,12 +128,12 @@ "outputs": [], "source": [ "agent = ComputerAgent(\n", - " tools=[computer],\n", - " model=\"openai/computer-use-preview\",\n", - " save_trajectory=True,\n", - " only_n_most_recent_images=3,\n", - " verbosity=logging.INFO\n", - " )\n", + " tools=[computer],\n", + " model=\"openai/computer-use-preview\",\n", + " save_trajectory=True,\n", + " only_n_most_recent_images=3,\n", + " verbosity=logging.INFO,\n", + ")\n", "\n", "\n", "for i, task in enumerate(tasks):\n", diff --git a/notebooks/composite_agents_docker_nb.ipynb b/notebooks/composite_agents_docker_nb.ipynb index ea12e166..65bc2104 100644 --- a/notebooks/composite_agents_docker_nb.ipynb +++ b/notebooks/composite_agents_docker_nb.ipynb @@ -76,13 +76,18 @@ "source": [ "import os\n", "\n", - "OPENROUTER_API_KEY = os.getenv('OPENROUTER_API_KEY') or input('Enter your OPENROUTER_API_KEY: ').strip()\n", - "os.environ['OPENROUTER_API_KEY'] = OPENROUTER_API_KEY\n", + "OPENROUTER_API_KEY = (\n", + " os.getenv(\"OPENROUTER_API_KEY\") or input(\"Enter your OPENROUTER_API_KEY: \").strip()\n", + ")\n", + "os.environ[\"OPENROUTER_API_KEY\"] = OPENROUTER_API_KEY\n", "\n", "# Optional: if planning model uses OpenAI provider\n", - "OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') or input('(Optional) Enter your OPENAI_API_KEY (press Enter to skip): ').strip()\n", + "OPENAI_API_KEY = (\n", + " os.getenv(\"OPENAI_API_KEY\")\n", + " or input(\"(Optional) Enter your OPENAI_API_KEY (press Enter to skip): \").strip()\n", + ")\n", "if OPENAI_API_KEY:\n", - " os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY" + " os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY" ] }, { @@ -104,24 +109,26 @@ "from computer import Computer\n", "from agent import ComputerAgent\n", "\n", + "\n", "async def main():\n", " # Launch & connect to a Docker container running the Computer Server\n", " async with Computer(\n", - " os_type='linux',\n", - " provider_type='docker',\n", - " image='trycua/cua-ubuntu:latest',\n", - " name='my-cua-container'\n", + " os_type=\"linux\",\n", + " provider_type=\"docker\",\n", + " image=\"trycua/cua-ubuntu:latest\",\n", + " name=\"my-cua-container\",\n", " ) as computer:\n", " agent = ComputerAgent(\n", - " model='openrouter/z-ai/glm-4.5v+openai/gpt-5-nano',\n", + " model=\"openrouter/z-ai/glm-4.5v+openai/gpt-5-nano\",\n", " tools=[computer],\n", - " trajectory_dir='trajectories' # Save agent trajectory (screenshots, api calls)\n", + " trajectory_dir=\"trajectories\", # Save agent trajectory (screenshots, api calls)\n", " )\n", "\n", " # Simple task to verify end-to-end\n", - " async for _ in agent.run('Open a browser and go to example.com'):\n", + " async for _ in agent.run(\"Open a browser and go to example.com\"):\n", " pass\n", "\n", + "\n", "asyncio.run(main())" ] }, diff --git a/notebooks/computer_nb.ipynb b/notebooks/computer_nb.ipynb index db76d0f5..96791faf 100644 --- a/notebooks/computer_nb.ipynb +++ b/notebooks/computer_nb.ipynb @@ -49,10 +49,8 @@ "# Get API key and container name from environment or prompt user\n", "import os\n", "\n", - "cua_api_key = os.getenv(\"CUA_API_KEY\") or \\\n", - " input(\"Enter your Cua API Key: \")\n", - "container_name = os.getenv(\"CONTAINER_NAME\") or \\\n", - " input(\"Enter your Cloud Container name: \")" + "cua_api_key = os.getenv(\"CUA_API_KEY\") or input(\"Enter your Cua API Key: \")\n", + "container_name = os.getenv(\"CONTAINER_NAME\") or input(\"Enter your Cloud Container name: \")" ] }, { @@ -68,7 +66,9 @@ "metadata": {}, "outputs": [], "source": [ - "os_type = input(\"Enter the OS type of your sandbox (linux/macos) [default: linux]: \").lower() or \"linux\"" + "os_type = (\n", + " input(\"Enter the OS type of your sandbox (linux/macos) [default: linux]: \").lower() or \"linux\"\n", + ")" ] }, { @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "await computer.run() # Initialize the computer first\n", + "await computer.run() # Initialize the computer first\n", "\n", "screenshot = await computer.interface.screenshot()\n", "\n", @@ -159,10 +159,10 @@ " os_type=\"linux\",\n", " provider_type=\"docker\",\n", " image=\"trycua/cua-ubuntu:latest\",\n", - " name=\"my-cua-container\"\n", + " name=\"my-cua-container\",\n", ")\n", "\n", - "await computer.run() # Launch & connect to Docker container" + "await computer.run() # Launch & connect to Docker container" ] }, { @@ -330,12 +330,7 @@ "metadata": {}, "outputs": [], "source": [ - "computer = Computer(\n", - " display=\"1024x768\",\n", - " memory=\"8GB\",\n", - " cpu=\"4\",\n", - " os_type=\"macos\"\n", - ")\n", + "computer = Computer(display=\"1024x768\", memory=\"8GB\", cpu=\"4\", os_type=\"macos\")\n", "\n", "await computer.run()" ] @@ -513,7 +508,7 @@ " memory=\"4GB\",\n", " cpu=\"2\",\n", " os_type=\"macos\",\n", - " shared_directories=[\"/absolute/path/to/directory\"]\n", + " shared_directories=[\"/absolute/path/to/directory\"],\n", ")" ] }, @@ -533,11 +528,7 @@ "outputs": [], "source": [ "computer = Computer(\n", - " display=\"1024x768\",\n", - " memory=\"4GB\",\n", - " cpu=\"2\",\n", - " os_type=\"macos\",\n", - " use_host_computer_server=True\n", + " display=\"1024x768\", memory=\"4GB\", cpu=\"2\", os_type=\"macos\", use_host_computer_server=True\n", ")" ] }, @@ -563,12 +554,7 @@ "metadata": {}, "outputs": [], "source": [ - "async with Computer(\n", - " display=\"1024x768\",\n", - " memory=\"4GB\",\n", - " cpu=\"2\",\n", - " os_type=\"macos\"\n", - ") as computer:\n", + "async with Computer(display=\"1024x768\", memory=\"4GB\", cpu=\"2\", os_type=\"macos\") as computer:\n", " await computer.run()\n", " res = await computer.interface.run_command(\"ls -a\")\n", "\n", diff --git a/notebooks/customizing_computeragent.ipynb b/notebooks/customizing_computeragent.ipynb index 56f0beb9..1664172b 100644 --- a/notebooks/customizing_computeragent.ipynb +++ b/notebooks/customizing_computeragent.ipynb @@ -40,10 +40,10 @@ " os_type=\"linux\",\n", " provider_type=\"docker\",\n", " image=\"trycua/cua-ubuntu:latest\",\n", - " name=\"my-cua-container\"\n", + " name=\"my-cua-container\",\n", ")\n", "\n", - "await computer.run() # Launch & connect to Docker container" + "await computer.run() # Launch & connect to Docker container" ] }, { @@ -81,12 +81,12 @@ " instructions=instructions,\n", " callbacks=[LoggingCallback(level=logging.INFO)],\n", ")\n", - "messages = [\n", - " {\"role\": \"user\", \"content\": \"Open the settings and turn on dark mode.\"}\n", - "]\n", + "messages = [{\"role\": \"user\", \"content\": \"Open the settings and turn on dark mode.\"}]\n", "\n", "# In notebooks, you may want to consume the async generator\n", "import asyncio\n", + "\n", + "\n", "async def run_once():\n", " async for chunk in agent.run(messages):\n", " # Print any assistant text outputs\n", @@ -96,7 +96,8 @@ " if c.get(\"text\"):\n", " print(c.get(\"text\"))\n", "\n", - "await run_once()\n" + "\n", + "await run_once()" ] }, { @@ -127,11 +128,12 @@ " return \"0.00%\"\n", " return f\"{(numerator/denominator)*100:.2f}%\"\n", "\n", + "\n", "agent_with_tool = ComputerAgent(\n", " model=\"openai/computer-use-preview\",\n", " tools=[computer, calculate_percentage],\n", " instructions=\"When doing math, prefer the `calculate_percentage` tool when relevant.\",\n", - ")\n" + ")" ] }, { @@ -152,13 +154,13 @@ "from agent.callbacks import ImageRetentionCallback, TrajectorySaverCallback\n", "\n", "agent_with_callbacks = ComputerAgent(\n", - " model=\"anthropic/claude-3-5-sonnet-20241022\",\n", + " model=\"anthropic/claude-sonnet-4-5-20250929\",\n", " tools=[computer],\n", " callbacks=[\n", " ImageRetentionCallback(only_n_most_recent_images=3),\n", " TrajectorySaverCallback(\"./trajectories\"),\n", " ],\n", - ")\n" + ")" ] }, { @@ -198,4 +200,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/notebooks/eval_osworld.ipynb b/notebooks/eval_osworld.ipynb index 234c90ff..99e3243f 100644 --- a/notebooks/eval_osworld.ipynb +++ b/notebooks/eval_osworld.ipynb @@ -17,7 +17,7 @@ "outputs": [], "source": [ "# # Install dependencies if needed\n", - "# !uv venv \n", + "# !uv venv\n", "# !source .venv/bin/activate\n", "# !uv sync" ] @@ -42,14 +42,14 @@ "import os\n", "\n", "# Load environment variables from ../.env\n", - "load_dotenv(dotenv_path='../.env')\n", + "load_dotenv(dotenv_path=\"../.env\")\n", "\n", "# Required environment variables:\n", "# - HUD_API_KEY (for HUD access)\n", "# - ANTHROPIC_API_KEY (for Claude models)\n", "# - OPENAI_API_KEY (for OpenAI models)\n", - "assert os.getenv('HUD_API_KEY') is not None\n", - "assert os.getenv('ANTHROPIC_API_KEY') is not None or os.getenv('OPENAI_API_KEY') is not None\n", + "assert os.getenv(\"HUD_API_KEY\") is not None\n", + "assert os.getenv(\"ANTHROPIC_API_KEY\") is not None or os.getenv(\"OPENAI_API_KEY\") is not None\n", "\n", "from pprint import pprint" ] @@ -76,7 +76,7 @@ "await run_single_task(\n", " dataset=\"hud-evals/OSWorld-Verified\",\n", " model=\"openai/computer-use-preview+openai/gpt-5\", # or any supported model string\n", - " task_id=155 # open last tab task (easy)\n", + " task_id=155, # open last tab task (easy)\n", ")" ] }, @@ -100,12 +100,12 @@ "job_name = f\"osworld-test-{str(uuid.uuid4())[:4]}\"\n", "\n", "results = await run_full_dataset(\n", - " dataset=\"hud-evals/OSWorld-Verified\", # You can also pass a Dataset or a list[dict]\n", - " job_name=job_name, # Optional; defaults to a timestamp for custom datasets\n", - " model=\"openai/computer-use-preview\", # Or any supported model string\n", - " max_concurrent=20, # Tune to your infra\n", - " max_steps=50, # Safety cap per task\n", - " split=\"train[:3]\" # Limit to just 3 tasks\n", + " dataset=\"hud-evals/OSWorld-Verified\", # You can also pass a Dataset or a list[dict]\n", + " job_name=job_name, # Optional; defaults to a timestamp for custom datasets\n", + " model=\"openai/computer-use-preview\", # Or any supported model string\n", + " max_concurrent=20, # Tune to your infra\n", + " max_steps=50, # Safety cap per task\n", + " split=\"train[:3]\", # Limit to just 3 tasks\n", ")\n", "\n", "# results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\n", @@ -133,7 +133,7 @@ "models_to_test = [\n", " \"openai/computer-use-preview+anthropic/claude-opus-4-20250514\",\n", "]\n", - " \n", + "\n", "\n", "for model in models_to_test:\n", " # Full dataset evaluation (runs via HUD's run_dataset under the hood)\n", @@ -142,12 +142,12 @@ "\n", " results = await run_full_dataset(\n", " dataset=\"hud-evals/OSWorld-Verified\",\n", - " job_name=job_name, \n", + " job_name=job_name,\n", " model=model,\n", - " max_concurrent=20, \n", + " max_concurrent=20,\n", " max_steps=75,\n", " trajectory_dir=f\"trajectories/osworld_{job_uuid}\",\n", - " only_n_most_recent_images=3\n", + " only_n_most_recent_images=3,\n", " )\n", "\n", " # results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\n", diff --git a/notebooks/ollama_nb.ipynb b/notebooks/ollama_nb.ipynb index d0296145..9b5cb188 100644 --- a/notebooks/ollama_nb.ipynb +++ b/notebooks/ollama_nb.ipynb @@ -73,11 +73,12 @@ "\"\"\"\n", "\n", "from pathlib import Path\n", - "if not Path('.env').exists():\n", - " Path('.env').write_text(ENV_TEMPLATE)\n", - " print('A .env file was created! Fill in the empty values you need.')\n", + "\n", + "if not Path(\".env\").exists():\n", + " Path(\".env\").write_text(ENV_TEMPLATE)\n", + " print(\"A .env file was created! Fill in the empty values you need.\")\n", "else:\n", - " print('.env already exists')\n" + " print(\".env already exists\")" ] }, { @@ -90,10 +91,11 @@ "# Load .env into environment\n", "import os\n", "from dotenv import load_dotenv\n", - "load_dotenv(dotenv_path='.env', override=True)\n", - "print('OPENAI_API_KEY set:', bool(os.getenv('OPENAI_API_KEY')))\n", - "print('ANTHROPIC_API_KEY set:', bool(os.getenv('ANTHROPIC_API_KEY')))\n", - "print('OLLAMA_API_BASE:', os.getenv('OLLAMA_API_BASE', 'http://localhost:11434'))\n" + "\n", + "load_dotenv(dotenv_path=\".env\", override=True)\n", + "print(\"OPENAI_API_KEY set:\", bool(os.getenv(\"OPENAI_API_KEY\")))\n", + "print(\"ANTHROPIC_API_KEY set:\", bool(os.getenv(\"ANTHROPIC_API_KEY\")))\n", + "print(\"OLLAMA_API_BASE:\", os.getenv(\"OLLAMA_API_BASE\", \"http://localhost:11434\"))" ] }, { @@ -143,15 +145,11 @@ "from computer import Computer, VMProviderType\n", "import webbrowser\n", "\n", - "computer = Computer(\n", - " os_type=\"linux\",\n", - " provider_type=VMProviderType.DOCKER,\n", - " verbosity=logging.INFO\n", - ")\n", + "computer = Computer(os_type=\"linux\", provider_type=VMProviderType.DOCKER, verbosity=logging.INFO)\n", "await computer.run()\n", "\n", "# Optional: open the VNC page in your browser\n", - "webbrowser.open('http://localhost:8006/', new=0, autoraise=True)\n" + "webbrowser.open(\"http://localhost:8006/\", new=0, autoraise=True)" ] }, { @@ -181,16 +179,16 @@ "agent_all_in_one = ComputerAgent(\n", " model=\"ollama/blaifa/InternVL3_5:8b\",\n", " tools=[computer],\n", - " trajectory_dir=str(Path('trajectories')),\n", + " trajectory_dir=str(Path(\"trajectories\")),\n", " only_n_most_recent_images=3,\n", " verbosity=logging.INFO,\n", " # instructions=\"You are a helpful assistant.\" # Editable instructions for prompt engineering\n", ")\n", "\n", - "print('Running all-in-one Ollama CUA model...')\n", + "print(\"Running all-in-one Ollama CUA model...\")\n", "async for _ in agent_all_in_one.run(\"Open the web browser and go to example.com\"):\n", " pass\n", - "print('βœ… Done')\n" + "print(\"βœ… Done\")" ] }, { @@ -205,7 +203,7 @@ "\n", "Examples:\n", "- `openai/computer-use-preview+ollama/gemma3:4b`\n", - "- `anthropic/claude-3-5-sonnet-20241022+ollama/gemma3:4b`\n" + "- `anthropic/claude-sonnet-4-5-20250929+ollama/gemma3:4b`\n" ] }, { @@ -219,24 +217,37 @@ "import logging\n", "\n", "agent_composed = ComputerAgent(\n", - " model=\"anthropic/claude-3-5-sonnet-20241022+ollama/gemma3:4b\",\n", + " model=\"anthropic/claude-sonnet-4-5-20250929+ollama/gemma3:4b\",\n", " tools=[computer],\n", - " trajectory_dir='trajectories',\n", + " trajectory_dir=\"trajectories\",\n", " only_n_most_recent_images=3,\n", " verbosity=logging.INFO,\n", ")\n", "\n", - "print('Running composed agent (OpenAI grounding + Ollama VLM)...')\n", + "print(\"Running composed agent (OpenAI grounding + Ollama VLM)...\")\n", "async for _ in agent_composed.run(\"Open a text editor and type: Hello from composed model!\"):\n", " pass\n", - "print('βœ… Done')\n" + "print(\"βœ… Done\")" ] }, { "cell_type": "markdown", "id": "section-3-conceptual", "metadata": {}, - "source": "## 3) Customize your agent πŸ› οΈ\n\nFor a few customization options, see: https://cua.ai/docs/agent-sdk/customizing-computeragent\n\nLevels of customization you can explore:\n\n1) Simple β€” Prompt engineering\n2) Easy β€” Tools\n3) Intermediate β€” Callbacks\n4) Expert β€” Custom agent via `register_agent` (see `libs/python/agent/agent/decorators.py` β†’ `register_agent`)\n\nor, incorporate the ComputerAgent into your own agent framework!" + "source": [ + "## 3) Customize your agent πŸ› οΈ\n", + "\n", + "For a few customization options, see: https://cua.ai/docs/agent-sdk/customizing-computeragent\n", + "\n", + "Levels of customization you can explore:\n", + "\n", + "1) Simple β€” Prompt engineering\n", + "2) Easy β€” Tools\n", + "3) Intermediate β€” Callbacks\n", + "4) Expert β€” Custom agent via `register_agent` (see `libs/python/agent/agent/decorators.py` β†’ `register_agent`)\n", + "\n", + "or, incorporate the ComputerAgent into your own agent framework!" + ] }, { "cell_type": "markdown", @@ -276,4 +287,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/notebooks/sota_hackathon.ipynb b/notebooks/sota_hackathon.ipynb index e840f57f..9429f1f8 100644 --- a/notebooks/sota_hackathon.ipynb +++ b/notebooks/sota_hackathon.ipynb @@ -79,6 +79,7 @@ "\"\"\"\n", "\n", "import os\n", + "\n", "if not os.path.exists(\".env\"):\n", " open(\".env\", \"w\").write(ENV_TEMPLATE)\n", " print(\"A .env file was created! Fill in the empty values.\")" @@ -103,7 +104,8 @@ "# HUD requires the .env file to be in the same directory\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv(dotenv_path='.env', override=True)\n", + "\n", + "load_dotenv(dotenv_path=\".env\", override=True)\n", "\n", "assert os.getenv(\"HUD_API_KEY\")" ] @@ -124,7 +126,22 @@ "id": "cd4393b0", "metadata": {}, "outputs": [], - "source": "import logging\nfrom pathlib import Path\nfrom agent import ComputerAgent\n\n# Here you can set the model and tools for your agent.\n# Computer use models: https://cua.ai/docs/agent-sdk/supported-agents/computer-use-agents\n# Composed agent models: https://cua.ai/docs/agent-sdk/supported-agents/composed-agents\n# Custom tools: https://cua.ai/docs/agent-sdk/custom-tools\nagent_config = {\n \"model\": \"openai/computer-use-preview\",\n \"trajectory_dir\": str(Path(\"trajectories\")),\n \"only_n_most_recent_images\": 3,\n \"verbosity\": logging.INFO\n}" + "source": [ + "import logging\n", + "from pathlib import Path\n", + "from agent import ComputerAgent\n", + "\n", + "# Here you can set the model and tools for your agent.\n", + "# Computer use models: https://cua.ai/docs/agent-sdk/supported-agents/computer-use-agents\n", + "# Composed agent models: https://cua.ai/docs/agent-sdk/supported-agents/composed-agents\n", + "# Custom tools: https://cua.ai/docs/agent-sdk/custom-tools\n", + "agent_config = {\n", + " \"model\": \"openai/computer-use-preview\",\n", + " \"trajectory_dir\": str(Path(\"trajectories\")),\n", + " \"only_n_most_recent_images\": 3,\n", + " \"verbosity\": logging.INFO,\n", + "}" + ] }, { "cell_type": "markdown", @@ -157,14 +174,10 @@ "import webbrowser\n", "\n", "# Connect to your existing cloud container\n", - "computer = Computer(\n", - " os_type=\"linux\",\n", - " provider_type=VMProviderType.DOCKER,\n", - " verbosity=logging.INFO\n", - ")\n", + "computer = Computer(os_type=\"linux\", provider_type=VMProviderType.DOCKER, verbosity=logging.INFO)\n", "await computer.run()\n", "\n", - "agent_config[\"tools\"] = [ computer ]\n", + "agent_config[\"tools\"] = [computer]\n", "\n", "webbrowser.open(\"http://localhost:8006/\", new=0, autoraise=True)" ] @@ -189,9 +202,7 @@ "# Create agent\n", "agent = ComputerAgent(**agent_config)\n", "\n", - "tasks = [\n", - " \"Open the web browser and search for a repository named trycua/cua on GitHub.\"\n", - "]\n", + "tasks = [\"Open the web browser and search for a repository named trycua/cua on GitHub.\"]\n", "\n", "for i, task in enumerate(tasks):\n", " print(f\"\\nExecuting task {i}/{len(tasks)}: {task}\")\n", @@ -218,7 +229,29 @@ "id": "6bf0887e", "metadata": {}, "outputs": [], - "source": "import uuid\nfrom pprint import pprint\nfrom agent.integrations.hud import run_full_dataset\n\njob_name = f\"osworld-test-{str(uuid.uuid4())[:4]}\"\n\n# Full dataset evaluation (runs via HUD's run_dataset under the hood)\n# See the documentation here: https://cua.ai/docs/agent-sdk/integrations/hud#running-a-full-dataset\nresults = await run_full_dataset(\n dataset=\"ddupont/OSWorld-Tiny-Public\",\n job_name=job_name,\n **agent_config,\n max_concurrent=20,\n max_steps=50,\n #split=\"train[:5]\"\n)\n\n# results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\nprint(f\"Job: {job_name}\")\nprint(f\"Total results: {len(results)}\")\npprint(results[:3])" + "source": [ + "import uuid\n", + "from pprint import pprint\n", + "from agent.integrations.hud import run_full_dataset\n", + "\n", + "job_name = f\"osworld-test-{str(uuid.uuid4())[:4]}\"\n", + "\n", + "# Full dataset evaluation (runs via HUD's run_dataset under the hood)\n", + "# See the documentation here: https://cua.ai/docs/agent-sdk/integrations/hud#running-a-full-dataset\n", + "results = await run_full_dataset(\n", + " dataset=\"ddupont/OSWorld-Tiny-Public\",\n", + " job_name=job_name,\n", + " **agent_config,\n", + " max_concurrent=20,\n", + " max_steps=50,\n", + " # split=\"train[:5]\"\n", + ")\n", + "\n", + "# results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\n", + "print(f\"Job: {job_name}\")\n", + "print(f\"Total results: {len(results)}\")\n", + "pprint(results[:3])" + ] }, { "cell_type": "markdown", diff --git a/notebooks/sota_hackathon_cloud.ipynb b/notebooks/sota_hackathon_cloud.ipynb index bcd50362..3059a7b2 100644 --- a/notebooks/sota_hackathon_cloud.ipynb +++ b/notebooks/sota_hackathon_cloud.ipynb @@ -78,6 +78,7 @@ "\"\"\"\n", "\n", "import os\n", + "\n", "if not os.path.exists(\".env\"):\n", " open(\".env\", \"w\").write(ENV_TEMPLATE)\n", " print(\"A .env file was created! Fill in the empty values.\")" @@ -102,7 +103,8 @@ "# HUD requires the .env file to be in the same directory\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv(dotenv_path='.env', override=True)\n", + "\n", + "load_dotenv(dotenv_path=\".env\", override=True)\n", "\n", "assert os.getenv(\"CUA_API_KEY\")\n", "assert os.getenv(\"CUA_CONTAINER_NAME\")\n", @@ -125,7 +127,22 @@ "id": "cd4393b0", "metadata": {}, "outputs": [], - "source": "import logging\nfrom pathlib import Path\nfrom agent import ComputerAgent\n\n# Here you can set the model and tools for your agent.\n# Computer use models: https://cua.ai/docs/agent-sdk/supported-agents/computer-use-agents\n# Composed agent models: https://cua.ai/docs/agent-sdk/supported-agents/composed-agents\n# Custom tools: https://cua.ai/docs/agent-sdk/custom-tools\nagent_config = {\n \"model\": \"openai/computer-use-preview\",\n \"trajectory_dir\": str(Path(\"trajectories\")),\n \"only_n_most_recent_images\": 3,\n \"verbosity\": logging.INFO\n}" + "source": [ + "import logging\n", + "from pathlib import Path\n", + "from agent import ComputerAgent\n", + "\n", + "# Here you can set the model and tools for your agent.\n", + "# Computer use models: https://cua.ai/docs/agent-sdk/supported-agents/computer-use-agents\n", + "# Composed agent models: https://cua.ai/docs/agent-sdk/supported-agents/composed-agents\n", + "# Custom tools: https://cua.ai/docs/agent-sdk/custom-tools\n", + "agent_config = {\n", + " \"model\": \"openai/computer-use-preview\",\n", + " \"trajectory_dir\": str(Path(\"trajectories\")),\n", + " \"only_n_most_recent_images\": 3,\n", + " \"verbosity\": logging.INFO,\n", + "}" + ] }, { "cell_type": "markdown", @@ -158,10 +175,10 @@ " provider_type=VMProviderType.CLOUD,\n", " name=os.getenv(\"CUA_CONTAINER_NAME\") or \"\",\n", " api_key=os.getenv(\"CUA_API_KEY\"),\n", - " verbosity=logging.INFO\n", + " verbosity=logging.INFO,\n", ")\n", "\n", - "agent_config[\"tools\"] = [ computer ]" + "agent_config[\"tools\"] = [computer]" ] }, { @@ -180,9 +197,7 @@ "# Create agent\n", "agent = ComputerAgent(**agent_config)\n", "\n", - "tasks = [\n", - " \"Open the web browser and search for a repository named trycua/cua on GitHub.\"\n", - "]\n", + "tasks = [\"Open the web browser and search for a repository named trycua/cua on GitHub.\"]\n", "\n", "for i, task in enumerate(tasks):\n", " print(f\"\\nExecuting task {i}/{len(tasks)}: {task}\")\n", @@ -209,7 +224,29 @@ "id": "6bf0887e", "metadata": {}, "outputs": [], - "source": "import uuid\nfrom pprint import pprint\nfrom agent.integrations.hud import run_full_dataset\n\njob_name = f\"osworld-test-{str(uuid.uuid4())[:4]}\"\n\n# Full dataset evaluation (runs via HUD's run_dataset under the hood)\n# See the documentation here: https://cua.ai/docs/agent-sdk/integrations/hud#running-a-full-dataset\nresults = await run_full_dataset(\n dataset=\"ddupont/OSWorld-Tiny-Public\",\n job_name=job_name,\n **agent_config,\n max_concurrent=20,\n max_steps=50,\n #split=\"train[:5]\"\n)\n\n# results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\nprint(f\"Job: {job_name}\")\nprint(f\"Total results: {len(results)}\")\npprint(results[:3])" + "source": [ + "import uuid\n", + "from pprint import pprint\n", + "from agent.integrations.hud import run_full_dataset\n", + "\n", + "job_name = f\"osworld-test-{str(uuid.uuid4())[:4]}\"\n", + "\n", + "# Full dataset evaluation (runs via HUD's run_dataset under the hood)\n", + "# See the documentation here: https://cua.ai/docs/agent-sdk/integrations/hud#running-a-full-dataset\n", + "results = await run_full_dataset(\n", + " dataset=\"ddupont/OSWorld-Tiny-Public\",\n", + " job_name=job_name,\n", + " **agent_config,\n", + " max_concurrent=20,\n", + " max_steps=50,\n", + " # split=\"train[:5]\"\n", + ")\n", + "\n", + "# results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\n", + "print(f\"Job: {job_name}\")\n", + "print(f\"Total results: {len(results)}\")\n", + "pprint(results[:3])" + ] }, { "cell_type": "markdown", diff --git a/samples/community/global-online/README.md b/samples/community/global-online/README.md deleted file mode 100644 index 82ddd44e..00000000 --- a/samples/community/global-online/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Global Online Hackathon Submission - -In construction πŸ— ️ diff --git a/samples/community/hack-the-north/README.md b/samples/community/hack-the-north/README.md deleted file mode 100644 index 5d4fef0c..00000000 --- a/samples/community/hack-the-north/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Hack the North Hackathon Submission - -In construction πŸ—οΈ diff --git a/scripts/install-cli.ps1 b/scripts/install-cli.ps1 new file mode 100644 index 00000000..d1c6e9ec --- /dev/null +++ b/scripts/install-cli.ps1 @@ -0,0 +1,162 @@ +# CUA CLI Installation Script for Windows +$ErrorActionPreference = "Stop" + +function Install-WithBun { + Write-Host "Installing CUA CLI using Bun..." -ForegroundColor Yellow + + # Check if bun is already installed + if (-not (Get-Command bun -ErrorAction SilentlyContinue)) { + Write-Host "Installing Bun..." -ForegroundColor Yellow + try { + powershell -c "irm bun.sh/install.ps1|iex" + + # Refresh environment variables + $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") + + # Add bun to PATH for this session if not already there + $bunPath = "$env:USERPROFILE\.bun\bin" + if ($env:Path -notlike "*$bunPath*") { + $env:Path = "$bunPath;$env:Path" + } + } catch { + Write-Host "Error: Failed to install Bun. Please install manually from https://bun.sh" -ForegroundColor Red + return $false + } + } + + # Verify bun installation + if (-not (Get-Command bun -ErrorAction SilentlyContinue)) { + Write-Host "Error: Bun installation failed. Please install manually from https://bun.sh" -ForegroundColor Red + return $false + } + + try { + bun add -g @trycua/cli + # Determine installed version from npm registry + try { + $bunVersion = (npm view @trycua/cli version) 2>$null + if (-not $bunVersion) { $bunVersion = "unknown" } + } catch { $bunVersion = "unknown" } + # Ensure install dir and write version file + $installDir = "$env:USERPROFILE\.cua\bin" + if (-not (Test-Path $installDir)) { New-Item -ItemType Directory -Path $installDir -Force | Out-Null } + Set-Content -Path (Join-Path $installDir ".version") -Value $bunVersion -NoNewline + return $true + } catch { + Write-Host "Warning: Failed to install with Bun, trying npm..." -ForegroundColor Yellow + try { + npm install -g @trycua/cli + # Determine installed version from npm registry + try { + $npmVersion = (npm view @trycua/cli version) 2>$null + if (-not $npmVersion) { $npmVersion = "unknown" } + } catch { $npmVersion = "unknown" } + # Ensure install dir and write version file + $installDir = "$env:USERPROFILE\.cua\bin" + if (-not (Test-Path $installDir)) { New-Item -ItemType Directory -Path $installDir -Force | Out-Null } + Set-Content -Path (Join-Path $installDir ".version") -Value $npmVersion -NoNewline + return $true + } catch { + Write-Host "Error: Installation failed with npm as well." -ForegroundColor Red + return $false + } + } +} + +Write-Host "Installing CUA CLI..." -ForegroundColor Green + +# Determine if this is a 64-bit system +$is64Bit = [Environment]::Is64BitOperatingSystem +if (-not $is64Bit) { + Write-Host "Warning: 32-bit Windows is not supported. Falling back to Bun installation..." -ForegroundColor Yellow + if (Install-WithBun) { + exit 0 + } else { + Write-Host "Error: Installation failed. Please try installing manually:" -ForegroundColor Red + Write-Host " irm https://cua.ai/install.ps1 | iex" + exit 1 + } +} + +# Get the latest release version +try { + $release = Invoke-RestMethod -Uri "https://api.github.com/repos/trycua/cua/releases/latest" -ErrorAction Stop + $version = $release.tag_name -replace '^cua-v', '' + # Look for the windows binary in the release assets + $windowsAsset = $release.assets | Where-Object { $_.name -eq 'cua-windows-x64.exe' } + + if (-not $windowsAsset) { + throw "Windows binary not found in release assets" + } + + $binaryUrl = $windowsAsset.browser_download_url +} catch { + Write-Host "Warning: Could not fetch latest release, falling back to Bun installation" -ForegroundColor Yellow + if (Install-WithBun) { + exit 0 + } else { + Write-Host "Error: Installation failed. Please try installing manually:" -ForegroundColor Red + Write-Host " irm https://cua.ai/install.ps1 | iex" + exit 1 + } +} + +# Create installation directory +$installDir = "$env:USERPROFILE\.cua\bin" +if (-not (Test-Path $installDir)) { + New-Item -ItemType Directory -Path $installDir -Force | Out-Null +} + +$binaryPath = Join-Path $installDir "cua.exe" + +# Download the binary +Write-Host "Downloading CUA CLI $version for Windows x64..." -ForegroundColor Cyan +try { + Invoke-WebRequest -Uri $binaryUrl -OutFile $binaryPath -ErrorAction Stop +} catch { + Write-Host "Warning: Failed to download pre-built binary, falling back to Bun installation" -ForegroundColor Yellow + if (Install-WithBun) { + exit 0 + } else { + Write-Host "Error: Installation failed. Please try installing manually:" -ForegroundColor Red + Write-Host " irm https://cua.ai/install.ps1 | iex" + exit 1 + } +} + +# Write version file for binary install +try { + Set-Content -Path (Join-Path $installDir ".version") -Value $version -NoNewline +} catch { + # Non-fatal +} + +# Add to PATH if not already there +$currentPath = [Environment]::GetEnvironmentVariable("Path", "User") +if ($currentPath -notlike "*$installDir*") { + [Environment]::SetEnvironmentVariable("Path", "$currentPath;$installDir", "User") + $env:Path = "$env:Path;$installDir" + Write-Host "Success: Added $installDir to your PATH" -ForegroundColor Green +} + +# Verify installation +if (Test-Path $binaryPath) { + Write-Host "Success: CUA CLI $version installed successfully to $binaryPath" -ForegroundColor Green + Write-Host "" + Write-Host "Get started with:" -ForegroundColor Cyan + Write-Host " cua login" + Write-Host " cua create --os linux --configuration small --region north-america" + Write-Host "" + Write-Host "For more help, visit: https://docs.cua.ai/libraries/cua-cli" -ForegroundColor Cyan + + # Offer to add to PATH if not already there + if (-not ($env:Path -like "*$installDir*")) { + Write-Host "" + Write-Host "Note: Please restart your terminal or run the following command to use CUA CLI:" -ForegroundColor Yellow + Write-Host " `$env:Path += ';$installDir'" + } +} else { + Write-Host "Error: Installation failed. Please try installing manually:" -ForegroundColor Red + Write-Host " irm https://cua.ai/install.ps1 | iex" + exit 1 +} \ No newline at end of file diff --git a/scripts/install-cli.sh b/scripts/install-cli.sh new file mode 100755 index 00000000..5edf06ee --- /dev/null +++ b/scripts/install-cli.sh @@ -0,0 +1,210 @@ +#!/bin/bash +set -e + +# CUA CLI Installation Script for macOS/Linux +echo "πŸš€ Installing CUA CLI..." + +# Function to print success message +print_success() { + local bin_path="$1" + local version="$2" + local config_file="$3" + + printf "\033[32mβœ… CUA CLI %s was installed successfully to %s\033[0m\n" "$version" "$bin_path" + printf "\033[90mAdded \"%s\" to \$PATH in \"%s\"\033[0m\n" "$bin_path" "$config_file" + printf "\n\033[90mTo get started, run:\033[0m\n" + printf " source %s\n" "$config_file" + printf " cua --help\n" + printf "\033[90mπŸ“š For more help, visit: https://docs.cua.ai/libraries/cua-cli\033[0m\n" +} + +# Function to install with bun as fallback +install_with_bun() { + echo "πŸ“¦ Installing CUA CLI using Bun..." + + # Check if bun is already installed + if ! command -v bun &> /dev/null; then + echo "πŸ“¦ Installing Bun..." + curl -fsSL https://bun.sh/install | bash + + # Source the shell profile to make bun available + if [ -f "$HOME/.bashrc" ]; then + source "$HOME/.bashrc" + elif [ -f "$HOME/.zshrc" ]; then + source "$HOME/.zshrc" + fi + + # Add bun to PATH for this session + export PATH="$HOME/.bun/bin:$PATH" + fi + + # Verify bun installation + if ! command -v bun &> /dev/null; then + echo "❌ Failed to install Bun. Please install manually from https://bun.sh" + exit 1 + fi + + echo "πŸ“¦ Installing CUA CLI..." + if ! bun add -g @trycua/cli; then + echo "❌ Failed to install with Bun, trying npm..." + if ! npm install -g @trycua/cli; then + echo "❌ Installation failed. Please try installing manually:" + echo " npm install -g @trycua/cli" + exit 1 + fi + fi + + # Verify installation + if command -v cua &> /dev/null; then + # Determine which config file was updated + local config_file="$HOME/.bashrc" + if [ -f "$HOME/.zshrc" ]; then + config_file="$HOME/.zshrc" + elif [ -f "$HOME/.profile" ]; then + config_file="$HOME/.profile" + fi + # Determine installed version via npm registry (fallback to unknown) + local VERSION_BUN + VERSION_BUN=$(npm view @trycua/cli version 2>/dev/null || echo "unknown") + # Write version file to ~/.cua/bin/.version + local INSTALL_DIR="$HOME/.cua/bin" + mkdir -p "$INSTALL_DIR" + echo "$VERSION_BUN" > "$INSTALL_DIR/.version" + # Print success and exit + print_success "$(command -v cua)" "$VERSION_BUN" "$config_file" + exit 0 + else + echo "❌ Installation failed. Please try installing manually:" + echo " npm install -g @trycua/cli" + exit 1 + fi +} + +# Determine OS and architecture +OS=$(uname -s | tr '[:upper:]' '[:lower:]') +ARCH=$(uname -m) + +# Map architecture to the format used in release assets +case "$ARCH" in + x86_64) ARCH="x64" ;; + aarch64) ARCH="arm64" ;; + arm64) ARCH="arm64" ;; + *) ARCH="$ARCH" ;; +esac + +# Determine the binary name +BINARY_NAME="cua-${OS}-${ARCH}" +if [ "$OS" = "darwin" ] && [ "$ARCH" = "arm64" ]; then + BINARY_NAME="cua-darwin-arm64" +elif [ "$OS" = "darwin" ] && [ "$ARCH" = "x64" ]; then + BINARY_NAME="cua-darwin-x64" +elif [ "$OS" = "linux" ] && [ "$ARCH" = "x64" ]; then + BINARY_NAME="cua-linux-x64" +else + echo "⚠️ Pre-built binary not available for ${OS}-${ARCH}, falling back to Bun installation" + install_with_bun + exit 0 +fi + +# Get the latest release version +LATEST_RELEASE=$(curl -s https://api.github.com/repos/trycua/cua/releases/latest) +if [ -z "$LATEST_RELEASE" ]; then + echo "⚠️ Could not fetch latest release, falling back to Bun installation" + install_with_bun + exit 0 +fi + +# Extract version number (remove 'cua-v' prefix) +TAG_NAME=$(echo "$LATEST_RELEASE" | grep 'tag_name' | cut -d '"' -f 4) +VERSION=${TAG_NAME#cua-v} + +# Find the binary URL in the release assets +BINARY_URL=$(echo "$LATEST_RELEASE" | grep -o 'https://.*/download/[^"]*/'${BINARY_NAME}'"' | head -1) +BINARY_URL="${BINARY_URL%\"}" +printf "\033[90mBINARY_URL: %s\033[0m\n" "$BINARY_URL" + +if [ -z "$BINARY_URL" ]; then + echo "⚠️ Could not find ${BINARY_NAME} in release assets, falling back to Bun installation" + install_with_bun + exit 0 +fi + +# Create ~/.cua/bin directory if it doesn't exist +INSTALL_DIR="$HOME/.cua/bin" +mkdir -p "$INSTALL_DIR" + +# Download the binary +echo "πŸ“₯ Downloading CUA CLI $VERSION for ${OS}-${ARCH}..." +echo "πŸ“ Downloading from: $BINARY_URL" + +# Download with progress bar and proper error handling +if ! curl -L --progress-bar --fail "$BINARY_URL" -o "$INSTALL_DIR/cua"; then + echo "❌ Failed to download pre-built binary from $BINARY_URL" + echo "⚠️ Falling back to Bun installation" + install_with_bun + exit 0 +fi + +# Verify the downloaded file exists and has content +if [ ! -f "$INSTALL_DIR/cua" ] || [ ! -s "$INSTALL_DIR/cua" ]; then + echo "❌ Downloaded file is missing or empty" + echo "⚠️ Falling back to Bun installation" + rm -f "$INSTALL_DIR/cua" + install_with_bun + exit 0 +fi + +# Check if the downloaded file looks like a binary (not HTML error page) +if file "$INSTALL_DIR/cua" | grep -q "HTML\|text"; then + echo "❌ Downloaded file appears to be corrupted (HTML/text instead of binary)" + echo "⚠️ Falling back to Bun installation" + rm -f "$INSTALL_DIR/cua" + install_with_bun + exit 0 +fi + +# Make the binary executable +chmod +x "$INSTALL_DIR/cua" + +# Write version file +echo "$VERSION" > "$INSTALL_DIR/.version" + +# Add ~/.cua/bin to PATH if not already in PATH +if [[ ":$PATH:" != *":$INSTALL_DIR:"* ]]; then + # Add to .bashrc, .zshrc, or .profile + if [ -f "$HOME/.bashrc" ]; then + echo "export PATH=\"$INSTALL_DIR:\$PATH\"" >> "$HOME/.bashrc" + echo "Added $INSTALL_DIR to PATH in ~/.bashrc" + fi + + if [ -f "$HOME/.zshrc" ]; then + echo "export PATH=\"$INSTALL_DIR:\$PATH\"" >> "$HOME/.zshrc" + echo "Added $INSTALL_DIR to PATH in ~/.zshrc" + fi + + if [ -f "$HOME/.profile" ] && [ ! -f "$HOME/.bashrc" ] && [ ! -f "$HOME/.zshrc" ]; then + echo "export PATH=\"$INSTALL_DIR:\$PATH\"" >> "$HOME/.profile" + echo "Added $INSTALL_DIR to PATH in ~/.profile" + fi + + # Add to current session + export PATH="$INSTALL_DIR:$PATH" +fi + +# Verify installation +if command -v cua &> /dev/null; then + # Determine which config file was updated + config_file="$HOME/.bashrc" + if [ -f "$HOME/.zshrc" ]; then + config_file="$HOME/.zshrc" + elif [ -f "$HOME/.profile" ]; then + config_file="$HOME/.profile" + fi + + print_success "$(which cua)" "$VERSION" "$config_file" + exit 0 +else + echo "❌ Installation failed. Please try installing manually:" + echo " curl -fsSL https://cua.ai/install.sh | sh" + exit 1 +fi diff --git a/tests/agent_loop_testing/agent_test.py b/tests/agent_loop_testing/agent_test.py index b31c8249..127282d1 100644 --- a/tests/agent_loop_testing/agent_test.py +++ b/tests/agent_loop_testing/agent_test.py @@ -184,7 +184,7 @@ if __name__ == "__main__": parser = argparse.ArgumentParser(description="Test CUA Agent with mock computer") parser.add_argument( - "--model", default="anthropic/claude-sonnet-4-20250514", help="CUA model to test" + "--model", default="anthropic/claude-sonnet-4-5-20250929", help="CUA model to test" ) args = parser.parse_args() diff --git a/uv.lock b/uv.lock index 5c283383..0e26ddcc 100644 --- a/uv.lock +++ b/uv.lock @@ -861,7 +861,7 @@ wheels = [ [[package]] name = "cua-agent" -version = "0.4.37" +version = "0.4.53" source = { editable = "libs/python/agent" } dependencies = [ { name = "aiohttp" }, @@ -885,7 +885,6 @@ all = [ { name = "einops" }, { name = "google-genai" }, { name = "gradio" }, - { name = "hud-python" }, { name = "mlx-vlm", marker = "sys_platform == 'darwin'" }, { name = "pillow" }, { name = "python-dotenv" }, @@ -975,7 +974,6 @@ requires-dist = [ { name = "gradio", marker = "extra == 'all'", specifier = ">=5.23.3" }, { name = "gradio", marker = "extra == 'ui'", specifier = ">=5.23.3" }, { name = "httpx", specifier = ">=0.27.0" }, - { name = "hud-python", marker = "extra == 'all'", specifier = "==0.4.52" }, { name = "hud-python", marker = "extra == 'hud'", specifier = "==0.4.52" }, { name = "litellm", specifier = ">=1.74.12" }, { name = "mlx-vlm", marker = "sys_platform == 'darwin' and extra == 'all'", specifier = ">=0.1.27" }, @@ -1015,7 +1013,7 @@ provides-extras = ["openai", "anthropic", "qwen", "omni", "uitars", "uitars-mlx" [[package]] name = "cua-computer" -version = "0.4.11" +version = "0.4.17" source = { editable = "libs/python/computer" } dependencies = [ { name = "aiohttp" }, @@ -1059,13 +1057,12 @@ provides-extras = ["lume", "lumier", "ui", "all"] [[package]] name = "cua-computer-server" -version = "0.1.28" +version = "0.1.30" source = { editable = "libs/python/computer-server" } dependencies = [ { name = "aiohttp" }, { name = "fastapi" }, { name = "pillow" }, - { name = "pip-system-certs", marker = "sys_platform == 'win32'" }, { name = "pyautogui" }, { name = "pydantic" }, { name = "pynput" }, @@ -1073,6 +1070,7 @@ dependencies = [ { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, { name = "pyobjc-framework-quartz", marker = "sys_platform == 'darwin'" }, { name = "pyperclip" }, + { name = "python-certifi-win32", marker = "sys_platform == 'win32'" }, { name = "python-xlib", marker = "sys_platform == 'linux'" }, { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "pywinctl" }, @@ -1098,7 +1096,6 @@ requires-dist = [ { name = "aiohttp", specifier = ">=3.9.1" }, { name = "fastapi", specifier = ">=0.111.0" }, { name = "pillow", specifier = ">=10.2.0" }, - { name = "pip-system-certs", marker = "sys_platform == 'win32'" }, { name = "pyautogui", specifier = ">=0.9.54" }, { name = "pydantic", specifier = ">=2.0.0" }, { name = "pynput", specifier = ">=1.8.1" }, @@ -1109,6 +1106,7 @@ requires-dist = [ { name = "pyobjc-framework-quartz", marker = "sys_platform == 'darwin'", specifier = ">=10.1" }, { name = "pyobjc-framework-quartz", marker = "extra == 'macos'", specifier = ">=10.1" }, { name = "pyperclip", specifier = ">=1.9.0" }, + { name = "python-certifi-win32", marker = "sys_platform == 'win32'" }, { name = "python-xlib", marker = "sys_platform == 'linux'", specifier = ">=0.33" }, { name = "python-xlib", marker = "extra == 'linux'", specifier = ">=0.33" }, { name = "pywin32", marker = "sys_platform == 'win32'", specifier = ">=310" }, @@ -1847,6 +1845,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" }, + { url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" }, { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, { url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" }, { url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" }, @@ -1856,6 +1856,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" }, { url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" }, { url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" }, + { url = "https://files.pythonhosted.org/packages/1c/53/f9c440463b3057485b8594d7a638bed53ba531165ef0ca0e6c364b5cc807/greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b", size = 1564759, upload-time = "2025-11-04T12:42:19.395Z" }, + { url = "https://files.pythonhosted.org/packages/47/e4/3bb4240abdd0a8d23f4f88adec746a3099f0d86bfedb623f063b2e3b4df0/greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929", size = 1634288, upload-time = "2025-11-04T12:42:21.174Z" }, { url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" }, ] @@ -4330,27 +4332,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, ] -[[package]] -name = "pip" -version = "25.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/6e/74a3f0179a4a73a53d66ce57fdb4de0080a8baa1de0063de206d6167acc2/pip-25.3.tar.gz", hash = "sha256:8d0538dbbd7babbd207f261ed969c65de439f6bc9e5dbd3b3b9a77f25d95f343", size = 1803014, upload-time = "2025-10-25T00:55:41.394Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/44/3c/d717024885424591d5376220b5e836c2d5293ce2011523c9de23ff7bf068/pip-25.3-py3-none-any.whl", hash = "sha256:9655943313a94722b7774661c21049070f6bbb0a1516bf02f7c8d5d9201514cd", size = 1778622, upload-time = "2025-10-25T00:55:39.247Z" }, -] - -[[package]] -name = "pip-system-certs" -version = "5.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pip", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/7d/6a/563b05a4f6c9ddc205c98bb413e74221368efb98b8fb9cca96b578b8930c/pip_system_certs-5.3.tar.gz", hash = "sha256:19c8bf9957bcce7d69c4dbc2d0b2ef13de1984d53f50a59012e6dbbad0af67c6", size = 6395, upload-time = "2025-10-16T06:14:55.217Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/57/752b63c609affae8f26ae0f1d1103d6ea7e707ad45943f62f7422936071d/pip_system_certs-5.3-py3-none-any.whl", hash = "sha256:3fbb5de62e374a99b688b1ad06e64ee5c4aeb633ef23e3a677d32e3e84fd863c", size = 6896, upload-time = "2025-10-16T06:14:54.072Z" }, -] - [[package]] name = "platformdirs" version = "4.5.0" @@ -7479,6 +7460,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/74/79/3323f08c98b9a5b726303b68babdd26cf4fe710709b7c61c96e6bb4f3d10/python_bidi-0.6.6-cp313-cp313-win_amd64.whl", hash = "sha256:63f7a9eaec31078e7611ab958b6e18e796c05b63ca50c1f7298311dc1e15ac3e", size = 159973, upload-time = "2025-02-18T21:43:10.431Z" }, ] +[[package]] +name = "python-certifi-win32" +version = "1.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi", marker = "sys_platform == 'win32'" }, + { name = "setuptools-scm", marker = "sys_platform == 'win32'" }, + { name = "wrapt", marker = "sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/c5/9c455ba848b14adce70c0176106fad190b7854acdc120cf9e72af7b9ac2d/python_certifi_win32-1.6.1-py2.py3-none-any.whl", hash = "sha256:508fd4fb1730cad2d9dada061df737650c8cfaa205d64657faa4cc6a55384402", size = 7256, upload-time = "2022-07-02T22:13:55.87Z" }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -8165,6 +8159,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, ] +[[package]] +name = "setuptools-scm" +version = "9.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging", marker = "sys_platform == 'win32'" }, + { name = "setuptools", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/b1/19587742aad604f1988a8a362e660e8c3ac03adccdb71c96d86526e5eb62/setuptools_scm-9.2.2.tar.gz", hash = "sha256:1c674ab4665686a0887d7e24c03ab25f24201c213e82ea689d2f3e169ef7ef57", size = 203385, upload-time = "2025-10-19T22:08:05.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/ea/ac2bf868899d0d2e82ef72d350d97a846110c709bacf2d968431576ca915/setuptools_scm-9.2.2-py3-none-any.whl", hash = "sha256:30e8f84d2ab1ba7cb0e653429b179395d0c33775d54807fc5f1dd6671801aef7", size = 62975, upload-time = "2025-10-19T22:08:04.007Z" }, +] + [[package]] name = "shapely" version = "2.1.2"