diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 92da20d7..10478cb9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -51,6 +51,6 @@ Documentation improvements are always welcome. You can: - Improve API documentation - Add tutorials or guides -For detailed instructions on setting up your development environment and submitting code contributions, please see our [Developer-Guide](./docs/Developer-Guide.md) guide. +For detailed instructions on setting up your development environment and submitting code contributions, please see our [Developer-Guide](./docs/Developer-Guide.md). Feel free to join our [Discord community](https://discord.com/invite/mVnXXpdE85) to discuss ideas or get help with your contributions. \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md index 3ae3445c..207f2665 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2025 trycua +Copyright (c) 2025 Cua AI, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 85cf88ce..af895a65 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,11 @@ trycua%2Fcua | Trendshift +> We’re hosting the **Computer-Use Agents SOTA Challenge** at [Hack the North](https://hackthenorth.com) and online! +>> **Track A (On-site @ UWaterloo)**: Reserved for participants accepted to Hack the North. πŸ† Prize: **YC interview guaranteed**. +>> **Track B (Remote)**: Open to everyone worldwide. πŸ† Prize: **Cash award**. +>>> πŸ‘‰ Sign up here: [trycua.com/hackathon](https://www.trycua.com/hackathon) + **cua** ("koo-ah") is Docker for [Computer-Use Agents](https://www.oneusefulthing.org/p/when-you-give-a-claude-a-mouse) - it enables AI agents to control full operating systems in virtual containers and deploy them locally or to the cloud.
@@ -47,7 +52,7 @@ Missing a model? [Raise a feature request](https://github.com/trycua/cua/issues/ - [Get started with a Computer-Use Agent UI](https://docs.trycua.com/docs/quickstart-ui) - [Get started with the Computer-Use Agent CLI](https://docs.trycua.com/docs/quickstart-cli) -- [Get Started with the Python SDKs](https://docs.trycua.com/docs/quickstart-devs) +- [Get started with the Python SDKs](https://docs.trycua.com/docs/quickstart-devs)
@@ -181,17 +186,31 @@ Join our [Discord community](https://discord.com/invite/mVnXXpdE85) to discuss i ## License -Cua is open-sourced under the MIT License - see the [LICENSE](LICENSE) file for details. +Cua is open-sourced under the MIT License - see the [LICENSE](LICENSE) file for details. + +The base image `kasmweb/core-ubuntu-jammy` is maintained by [Kasm Technologies](https://github.com/kasmtech/workspaces-core-images) and distributed under the Apache License 2.0. Usage of that image is subject to its own license terms. Microsoft's OmniParser, which is used in this project, is licensed under the Creative Commons Attribution 4.0 International License (CC-BY-4.0) - see the [OmniParser LICENSE](https://github.com/microsoft/OmniParser/blob/master/LICENSE) file for details. +### Third-Party Licenses and Optional Components + +Some optional extras for this project depend on third-party packages that are licensed under terms different from the MIT License. + +- The optional "omni" extra (installed via `pip install "cua-agent[omni]"`) installs the `cua-som` module, which includes `ultralytics` and is licensed under the AGPL-3.0. + +When you choose to install and use such optional extras, your use, modification, and distribution of those third-party components are governed by their respective licenses (e.g., AGPL-3.0 for `ultralytics`). + ## Contributing -We welcome contributions to CUA! Please refer to our [Contributing Guidelines](CONTRIBUTING.md) for details. +We welcome contributions to Cua! Please refer to our [Contributing Guidelines](CONTRIBUTING.md) for details. ## Trademarks -Apple, macOS, and Apple Silicon are trademarks of Apple Inc. Ubuntu and Canonical are registered trademarks of Canonical Ltd. Microsoft is a registered trademark of Microsoft Corporation. This project is not affiliated with, endorsed by, or sponsored by Apple Inc., Canonical Ltd., or Microsoft Corporation. +Apple, macOS, and Apple Silicon are trademarks of Apple Inc. +Ubuntu and Canonical are registered trademarks of Canonical Ltd. +Microsoft is a registered trademark of Microsoft Corporation. + +This project is not affiliated with, endorsed by, or sponsored by Apple Inc., Canonical Ltd., Microsoft Corporation, or Kasm Technologies. ## Stargazers diff --git a/docs/content/docs/agent-sdk/agent-loops.mdx b/docs/content/docs/agent-sdk/agent-loops.mdx index 0be4e009..6a18f064 100644 --- a/docs/content/docs/agent-sdk/agent-loops.mdx +++ b/docs/content/docs/agent-sdk/agent-loops.mdx @@ -22,7 +22,7 @@ agent = ComputerAgent( tools=[computer] ) -prompt = "open github, navigate to trycua/cua" +prompt = "Take a screenshot and tell me what you see" async for result in agent.run(prompt): if result["output"][-1]["type"] == "message": @@ -30,3 +30,142 @@ async for result in agent.run(prompt): ``` For a list of supported models and configurations, see the [Supported Agents](./supported-agents/computer-use-agents) page. + +### Response Format + +```python +{ + "output": [ + { + "type": "message", + "role": "assistant", + "content": [{"type": "output_text", "text": "I can see..."}] + }, + { + "type": "computer_call", + "action": {"type": "screenshot"}, + "call_id": "call_123" + }, + { + "type": "computer_call_output", + "call_id": "call_123", + "output": {"image_url": "data:image/png;base64,..."} + } + ], + "usage": { + "prompt_tokens": 150, + "completion_tokens": 75, + "total_tokens": 225, + "response_cost": 0.01, + } +} +``` + +### Environment Variables + +Use the following environment variables to configure the agent and its access to cloud computers and LLM providers: + +```bash +# Computer instance (cloud) +export CUA_CONTAINER_NAME="your-container-name" +export CUA_API_KEY="your-cua-api-key" + +# LLM API keys +export ANTHROPIC_API_KEY="your-anthropic-key" +export OPENAI_API_KEY="your-openai-key" +``` + +### Input and output + +The input prompt passed to `Agent.run` can either be a string or a list of message dictionaries: + +```python +messages = [ + { + "role": "user", + "content": "Take a screenshot and describe what you see" + }, + { + "role": "assistant", + "content": "I'll take a screenshot for you." + } +] +``` + +The output is an AsyncGenerator that yields response chunks. + +### Parameters + +The `ComputerAgent` constructor provides a wide range of options for customizing agent behavior, tool integration, callbacks, resource management, and more. + +- `model` (`str`): Default: **required** + The LLM or agent model to use. Determines which agent loop is selected unless `custom_loop` is provided. (e.g., "claude-3-5-sonnet-20241022", "computer-use-preview", "omni+vertex_ai/gemini-pro") +- `tools` (`List[Any]`): + List of tools the agent can use (e.g., `Computer`, sandboxed Python functions, etc.). +- `custom_loop` (`Callable`): + Optional custom agent loop function. If provided, overrides automatic loop selection. +- `only_n_most_recent_images` (`int`): + If set, only the N most recent images are kept in the message history. Useful for limiting memory usage. Automatically adds `ImageRetentionCallback`. +- `callbacks` (`List[Any]`): + List of callback instances for advanced preprocessing, postprocessing, logging, or custom hooks. See [Callbacks & Extensibility](#callbacks--extensibility). +- `verbosity` (`int`): + Logging level (e.g., `logging.INFO`). If set, adds a logging callback. +- `trajectory_dir` (`str`): + Directory path to save full trajectory data, including screenshots and responses. Adds `TrajectorySaverCallback`. +- `max_retries` (`int`): Default: `3` + Maximum number of retries for failed API calls (default: 3). +- `screenshot_delay` (`float` | `int`): Default: `0.5` + Delay (in seconds) before taking screenshots (default: 0.5). +- `use_prompt_caching` (`bool`): Default: `False` + Enables prompt caching for repeated prompts (mainly for Anthropic models). +- `max_trajectory_budget` (`float` | `dict`): + If set (float or dict), adds a budget manager callback that tracks usage costs and stops execution if the budget is exceeded. Dict allows advanced options (e.g., `{ "max_budget": 5.0, "raise_error": True }`). +- `**kwargs` (`any`): + Any additional keyword arguments are passed through to the agent loop or model provider. + +**Example with advanced options:** + +```python +from agent import ComputerAgent +from computer import Computer +from agent.callbacks import ImageRetentionCallback + +agent = ComputerAgent( + model="anthropic/claude-3-5-sonnet-20241022", + tools=[Computer(...)], + only_n_most_recent_images=3, + callbacks=[ImageRetentionCallback(only_n_most_recent_images=3)], + verbosity=logging.INFO, + trajectory_dir="trajectories", + max_retries=5, + screenshot_delay=1.0, + use_prompt_caching=True, + max_trajectory_budget={"max_budget": 5.0, "raise_error": True} +) +``` + +### Streaming Responses + +```python +async for result in agent.run(messages, stream=True): + # Process streaming chunks + for item in result["output"]: + if item["type"] == "message": + print(item["content"][0]["text"], end="", flush=True) + elif item["type"] == "computer_call": + action = item["action"] + print(f"\n[Action: {action['type']}]") +``` + +### Error Handling + +```python +try: + async for result in agent.run(messages): + # Process results + pass +except BudgetExceededException: + print("Budget limit exceeded") +except Exception as e: + print(f"Agent error: {e}") +``` \ No newline at end of file diff --git a/docs/content/docs/agent-sdk/benchmarks/index.mdx b/docs/content/docs/agent-sdk/benchmarks/index.mdx index 59e9b7ad..6397b2ec 100644 --- a/docs/content/docs/agent-sdk/benchmarks/index.mdx +++ b/docs/content/docs/agent-sdk/benchmarks/index.mdx @@ -9,9 +9,9 @@ The benchmark system evaluates models on GUI grounding tasks, specifically agent ## Available Benchmarks -- **[ScreenSpot-v2](./screenspot-v2)** - Standard resolution GUI grounding -- **[ScreenSpot-Pro](./screenspot-pro)** - High-resolution GUI grounding -- **[Interactive Testing](./interactive)** - Real-time testing and visualization +- **[ScreenSpot-v2](./benchmarks/screenspot-v2)** - Standard resolution GUI grounding +- **[ScreenSpot-Pro](./benchmarks/screenspot-pro)** - High-resolution GUI grounding +- **[Interactive Testing](./benchmarks/interactive)** - Real-time testing and visualization ## Quick Start diff --git a/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx b/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx index 4b75ebcf..494c4a8f 100644 --- a/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx +++ b/docs/content/docs/agent-sdk/callbacks/agent-lifecycle.mdx @@ -42,11 +42,4 @@ Called when responses are received from agent loop. - `on_screenshot(screenshot, name)` - When screenshots are taken ### 10. `on_run_end(kwargs, old_items, new_items)` -Called when agent run completes. Finalize tracking, save trajectories. - -## Built-in Callbacks - -- **ImageRetentionCallback**: Limits recent images in context -- **BudgetManagerCallback**: Stops execution when budget exceeded -- **TrajectorySaverCallback**: Saves conversation trajectories -- **LoggingCallback**: Logs agent activities +Called when agent run completes. Finalize tracking, save trajectories. \ No newline at end of file diff --git a/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx b/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx index 9a543dc1..0787b1f6 100644 --- a/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx +++ b/docs/content/docs/agent-sdk/callbacks/cost-saving.mdx @@ -28,19 +28,23 @@ agent = ComputerAgent( ## Budget Manager Shorthand ```python +# Simple budget limit agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer], - max_trajectory_budget=5.0 # Auto-adds BudgetManagerCallback + max_trajectory_budget=5.0 # $5 limit ) ``` **Or with options:** ```python +# Advanced budget configuration agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer], - max_trajectory_budget={"max_budget": 5.0, "raise_error": True} + max_trajectory_budget={ + "max_budget": 10.0, + "raise_error": True, # Raise error when exceeded + "reset_after_each_run": False # Persistent across runs + } ) ``` diff --git a/docs/content/docs/agent-sdk/callbacks/index.mdx b/docs/content/docs/agent-sdk/callbacks/index.mdx new file mode 100644 index 00000000..590c236a --- /dev/null +++ b/docs/content/docs/agent-sdk/callbacks/index.mdx @@ -0,0 +1,64 @@ +--- +title: Callbacks +--- + +Callbacks in the Agent SDK provide hooks into the agent's lifecycle, allowing for custom functionality to be executed at various stages of an agent's run. They enable extensibility by allowing developers to integrate their own logic for tasks such as logging, cost management, and data anonymization. + +## Usage + +You can add preprocessing and postprocessing hooks using callbacks, or write your own by subclassing `AsyncCallbackHandler`. + +### Built-in Callbacks + +Built-in callbacks can be used as follows: + +```python +from agent.callbacks import ( + ImageRetentionCallback, + TrajectorySaverCallback, + BudgetManagerCallback, + LoggingCallback +) + +agent = ComputerAgent( + model="anthropic/claude-3-5-sonnet-20241022", + tools=[computer], + callbacks=[ + ImageRetentionCallback(only_n_most_recent_images=3), + TrajectorySaverCallback(trajectory_dir="trajectories"), + BudgetManagerCallback(max_budget=10.0, raise_error=True), + LoggingCallback(level=logging.INFO) + ] +) +``` + +The following built-in callbacks are available: + +- [BudgetManagerCallback](callbacks/cost-saving): Stops execution when budget exceeded +- [LoggingCallback](callbacks/trajectories): Logs agent activities +- **ImageRetentionCallback**: Limits recent images in context +- **TrajectorySaverCallback**: Saves conversation trajectories +- [PII Anonymization](callbacks/pii-anonymization) + +### Custom Callbacks + +Create custom callbacks using knowlege of the callback lifecycle as described in [Agent Lifecycle](callbacks/agent-lifecycle). + +```python +from agent.callbacks.base import AsyncCallbackHandler + +class CustomCallback(AsyncCallbackHandler): + async def on_llm_start(self, messages): + """Preprocess messages before LLM call""" + # Add custom preprocessing logic + return messages + + async def on_llm_end(self, messages): + """Postprocess messages after LLM call""" + # Add custom postprocessing logic + return messages + + async def on_usage(self, usage): + """Track usage information""" + print(f"Tokens used: {usage.total_tokens}") +``` diff --git a/docs/content/docs/agent-sdk/callbacks/trajectories.mdx b/docs/content/docs/agent-sdk/callbacks/trajectories.mdx index 8799c5eb..8118f217 100644 --- a/docs/content/docs/agent-sdk/callbacks/trajectories.mdx +++ b/docs/content/docs/agent-sdk/callbacks/trajectories.mdx @@ -29,8 +29,8 @@ agent = ComputerAgent( ```python agent = ComputerAgent( model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer], - trajectory_dir="trajectories" # Auto-adds TrajectorySaverCallback + trajectory_dir="trajectories", # Auto-save trajectories + tools=[computer] ) ``` @@ -46,6 +46,12 @@ The viewer provides: ## Trajectory Structure +Trajectories are saved with: +- Complete conversation history +- Usage statistics and costs +- Timestamps and metadata +- Screenshots and computer actions + Each trajectory contains: - **metadata.json**: Run info, timestamps, usage stats (`total_tokens`, `response_cost`) - **turn_000/**: Turn-by-turn conversation history (api calls, responses, computer calls, screenshots) diff --git a/docs/content/docs/agent-sdk/sandboxed-tools.mdx b/docs/content/docs/agent-sdk/custom-tools.mdx similarity index 65% rename from docs/content/docs/agent-sdk/sandboxed-tools.mdx rename to docs/content/docs/agent-sdk/custom-tools.mdx index 8fafce7a..00847cf6 100644 --- a/docs/content/docs/agent-sdk/sandboxed-tools.mdx +++ b/docs/content/docs/agent-sdk/custom-tools.mdx @@ -1,11 +1,29 @@ --- -title: Sandboxed Tools -slug: sandboxed-tools +title: Custom Tools +slug: custom-tools --- The Agent SDK supports defining custom Python tools that run securely in sandboxed environments on remote Cua Computers. This enables safe execution of user-defined functions, isolation of dependencies, and robust automation workflows. -## Example: Defining a Sandboxed Tool +## Custom Tools + +Define a custom tool for an agent: + +```python +def calculate(a: int, b: int) -> int: + """Calculate the sum of two integers""" + return a + b + +# Use with agent +agent = ComputerAgent( + model="anthropic/claude-3-5-sonnet-20241022", + tools=[computer, calculate] +) +``` + +## Sandboxed Tools + +Define a sandboxed tool: ```python from computer.helpers import sandboxed diff --git a/docs/content/docs/agent-sdk/integrations/hud.mdx b/docs/content/docs/agent-sdk/integrations/hud.mdx index b517121e..3ad35878 100644 --- a/docs/content/docs/agent-sdk/integrations/hud.mdx +++ b/docs/content/docs/agent-sdk/integrations/hud.mdx @@ -3,47 +3,131 @@ title: HUD Evals description: Use ComputerAgent with HUD for benchmarking and evaluation --- -The HUD integration allows you to use ComputerAgent with the [HUD benchmarking framework](https://www.hud.so/), providing the same interface as existing HUD agents while leveraging ComputerAgent's capabilities. +The HUD integration allows an agent to be benchmarked using the [HUD framework](https://www.hud.so/). Through the HUD integration, the agent controls a computer inside HUD, where tests are run to evaluate the success of each task. ## Installation +First, install the required package: + ```bash pip install "cua-agent[hud]" ## or install hud-python directly -# pip install hud-python==0.2.10 +# pip install hud-python==0.4.12 ``` -## Usage +## Environment Variables + +Before running any evaluations, you’ll need to set up your environment variables for HUD and your model providers: + +```bash +# HUD access +export HUD_API_KEY="your_hud_api_key" + +# Model provider keys (at least one required) +export OPENAI_API_KEY="your_openai_key" +export ANTHROPIC_API_KEY="your_anthropic_key" +``` + +## Running a Single Task + +You can run a single task from a HUD dataset for quick verification. + +### Example ```python -from agent.integrations.hud import run_job -from hud import load_taskset -from hud.taskset import TaskSet -import logging +from agent.integrations.hud import run_single_task -# Load taskset -taskset = await load_taskset("OSWorld-Verified") -taskset = TaskSet(tasks=taskset[:10]) # limit to 10 tasks instead of all 370 - -# Run benchmark job -job = await run_job( - model="openai/computer-use-preview", - # model="anthropic/claude-3-5-sonnet-20241022", - # model="huggingface-local/HelloKKMe/GTA1-7B+openai/gpt-5", - task_or_taskset=taskset, - job_name="test-computeragent-job", - max_concurrent_tasks=5, - # add any extra ComputerAgent kwargs: - verbosity=logging.INFO, # Enable logging - # trajectory_dir=".." # Save trajectories locally +await run_single_task( + dataset="hud-evals/OSWorld-Verified-XLang", # or another HUD dataset + model="openai/computer-use-preview+openai/gpt-5-nano", # any supported model string + task_id=155, # e.g., reopen last closed tab ) - -# Get results OR view them at app.hud.so -print(await job.get_analytics()) -print(f"View results at: https://app.hud.so/jobs/{job.id}") ``` -**Available Benchmarks:** -1. [OSWorld-Verified](/agent-sdk/benchmarks/osworld-verified) - Benchmark on OSWorld tasks +### Parameters -See the [HUD docs](https://docs.hud.so/environment-creation) for more eval environments. \ No newline at end of file +- `task_id` (`int`): Default: `0` + Index of the task to run from the dataset. + +## Running a Full Dataset + +To benchmark your agent at scale, you can run an entire dataset (or a subset) in parallel. + +### Example + +```python +from agent.integrations.hud import run_full_dataset + +results = await run_full_dataset( + dataset="hud-evals/OSWorld-Verified-XLang", # can also pass a Dataset or list[dict] + model="openai/computer-use-preview", + split="train[:3]", # try a few tasks to start + max_concurrent=20, # tune to your infra + max_steps=50 # safety cap per task +) +``` + +### Parameters + +- `job_name` (`str` | `None`): + Optional human-readable name for the evaluation job (shows up in HUD UI). +- `max_concurrent` (`int`): Default: `30` + Number of tasks to run in parallel. Scale this based on your infra. +- `max_steps` (`int`): Default: `50` + Safety cap on steps per task to prevent infinite loops. +- `split` (`str`): Default: `"train"` + Dataset split or subset (e.g., `"train[:10]"`). + +## Additional Parameters + +Both single-task and full-dataset runs share a common set of configuration options. These let you fine-tune how the evaluation runs. + +- `dataset` (`str` | `Dataset` | `list[dict]`): **Required** + HUD dataset name (e.g. `"hud-evals/OSWorld-Verified-XLang"`), a loaded `Dataset`, or a list of tasks. +- `model` (`str`): Default: `"computer-use-preview"` + Model string, e.g. `"openai/computer-use-preview+openai/gpt-5-nano"`. Supports composition with `+` (planning + grounding). +- `allowed_tools` (`list[str]`): Default: `["openai_computer"]` + Restrict which tools the agent may use. +- `tools` (`list[Any]`): + Extra tool configs to inject. +- `custom_loop` (`Callable`): + Optional custom agent loop function. If provided, overrides automatic loop selection. +- `only_n_most_recent_images` (`int`): Default: `5` for full dataset, `None` for single task. + Retain only the last N screenshots in memory. +- `callbacks` (`list[Any]`): + Hook functions for logging, telemetry, or side effects. +- `verbosity` (`int`): + Logging level. Set `2` for debugging every call/action. +- `trajectory_dir` (`str` | `dict`): + Save local copies of trajectories for replay/analysis. +- `max_retries` (`int`): Default: `3` + Number of retries for failed model/tool calls. +- `screenshot_delay` (`float` | `int`): Default: `0.5` + Delay (seconds) between screenshots to avoid race conditions. +- `use_prompt_caching` (`bool`): Default: `False` + Cache repeated prompts to reduce API calls. +- `max_trajectory_budget` (`float` | `dict`): + Limit on trajectory size/budget (e.g., tokens, steps). +- `telemetry_enabled` (`bool`): Default: `True` + Whether to send telemetry/traces to HUD. +- `**kwargs` (`any`): + Any additional keyword arguments are passed through to the agent loop or model provider. + +## Available Benchmarks + +HUD provides multiple benchmark datasets for realistic evaluation. + +1. **[OSWorld-Verified](/agent-sdk/benchmarks/osworld-verified)** – Benchmark on 369+ real-world desktop tasks across Chrome, LibreOffice, GIMP, VS Code, etc. + *Best for*: evaluating full computer-use agents in realistic environments. + *Verified variant*: fixes 300+ issues from earlier versions for reliability. + +**Coming soon:** SheetBench (spreadsheet automation) and other specialized HUD datasets. + +See the [HUD docs](https://docs.hud.so/environment-creation) for more eval environments. + +## Tips + +* **Debugging:** set `verbosity=2` to see every model call and tool action. +* **Performance:** lower `screenshot_delay` for faster runs; raise it if you see race conditions. +* **Safety:** always set `max_steps` (defaults to 50) to prevent runaway loops. +* **Custom tools:** pass extra `tools=[...]` into the agent config if you need beyond `openai_computer`. \ No newline at end of file diff --git a/docs/content/docs/agent-sdk/meta.json b/docs/content/docs/agent-sdk/meta.json index 5db33148..07bf7199 100644 --- a/docs/content/docs/agent-sdk/meta.json +++ b/docs/content/docs/agent-sdk/meta.json @@ -4,11 +4,11 @@ "pages": [ "agent-loops", "supported-agents", + "supported-model-providers", "chat-history", "callbacks", - "sandboxed-tools", + "custom-tools", "custom-computer-handlers", - "local-models", "prompt-caching", "usage-tracking", "benchmarks", diff --git a/docs/content/docs/agent-sdk/prompt-caching.mdx b/docs/content/docs/agent-sdk/prompt-caching.mdx index 5049b4bb..721895c5 100644 --- a/docs/content/docs/agent-sdk/prompt-caching.mdx +++ b/docs/content/docs/agent-sdk/prompt-caching.mdx @@ -25,7 +25,9 @@ agent = ComputerAgent( When using Anthropic-based CUAs (Claude models), setting `use_prompt_caching=True` will automatically add `{ "cache_control": "ephemeral" }` to your messages. This enables prompt caching for the session and can speed up repeated runs with the same prompt. -> **Note:** This argument is only required for Anthropic CUAs. For other providers, it is ignored. + +This argument is only required for Anthropic CUAs. For other providers, it is ignored. + ## OpenAI Provider diff --git a/docs/content/docs/agent-sdk/supported-agents/human-in-the-loop.mdx b/docs/content/docs/agent-sdk/supported-agents/human-in-the-loop.mdx index 8d084d7e..f1574891 100644 --- a/docs/content/docs/agent-sdk/supported-agents/human-in-the-loop.mdx +++ b/docs/content/docs/agent-sdk/supported-agents/human-in-the-loop.mdx @@ -62,5 +62,3 @@ The human-in-the-loop interface provides: - **Testing**: Validate agent, tool, and environment behavior manually --- - -For more details on the human tool implementation, see the [Human Tool Documentation](../../tools/human-tool). diff --git a/docs/content/docs/agent-sdk/supported-model-providers/index.mdx b/docs/content/docs/agent-sdk/supported-model-providers/index.mdx new file mode 100644 index 00000000..68e372b1 --- /dev/null +++ b/docs/content/docs/agent-sdk/supported-model-providers/index.mdx @@ -0,0 +1,32 @@ +--- +title: Supported Model Providers +--- + +## Supported Models + +### Anthropic Claude (Computer Use API) +```python +model="anthropic/claude-3-5-sonnet-20241022" +model="anthropic/claude-3-7-sonnet-20250219" +model="anthropic/claude-opus-4-20250514" +model="anthropic/claude-sonnet-4-20250514" +``` + +### OpenAI Computer Use Preview +```python +model="openai/computer-use-preview" +``` + +### UI-TARS (Local or Huggingface Inference) +```python +model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B" +model="ollama_chat/0000/ui-tars-1.5-7b" +``` + +### Omniparser + Any LLM +```python +model="omniparser+ollama_chat/mistral-small3.2" +model="omniparser+vertex_ai/gemini-pro" +model="omniparser+anthropic/claude-3-5-sonnet-20241022" +model="omniparser+openai/gpt-4o" +``` \ No newline at end of file diff --git a/docs/content/docs/agent-sdk/local-models.mdx b/docs/content/docs/agent-sdk/supported-model-providers/local-models.mdx similarity index 100% rename from docs/content/docs/agent-sdk/local-models.mdx rename to docs/content/docs/agent-sdk/supported-model-providers/local-models.mdx diff --git a/docs/content/docs/agent-sdk/usage-tracking.mdx b/docs/content/docs/agent-sdk/usage-tracking.mdx index 54bbcaae..2709d738 100644 --- a/docs/content/docs/agent-sdk/usage-tracking.mdx +++ b/docs/content/docs/agent-sdk/usage-tracking.mdx @@ -56,7 +56,7 @@ agent = ComputerAgent( ) ``` -See also: [Budget Manager Callbacks](./callbacks#cost-saving) +See also: [Budget Manager Callbacks](./callbacks/cost-saving) ## See Also - [Prompt Caching](./prompt-caching) diff --git a/docs/content/docs/computer-sdk/commands.mdx b/docs/content/docs/computer-sdk/commands.mdx index 30657471..d8e80493 100644 --- a/docs/content/docs/computer-sdk/commands.mdx +++ b/docs/content/docs/computer-sdk/commands.mdx @@ -202,17 +202,17 @@ Direct file and directory manipulation: ```typescript - // File existence checks + # File existence checks await computer.interface.fileExists(path); // Check if file exists await computer.interface.directoryExists(path); // Check if directory exists - // File content operations + # File content operations await computer.interface.readText(path, "utf-8"); // Read file content await computer.interface.writeText(path, content, "utf-8"); // Write file content await computer.interface.readBytes(path); // Read file content as bytes await computer.interface.writeBytes(path, content); // Write file content as bytes - // File and directory management + # File and directory management await computer.interface.deleteFile(path); // Delete file await computer.interface.createDir(path); // Create directory await computer.interface.deleteDir(path); // Delete directory @@ -243,3 +243,38 @@ Access system accessibility information: ``` + +## Delay Configuration + +Control timing between actions: + + + + ```python + # Set default delay between all actions (in seconds) + computer.interface.delay = 0.5 # 500ms delay between actions + + # Or specify delay for individual actions + await computer.interface.left_click(x, y, delay=1.0) # 1 second delay after click + await computer.interface.type_text("Hello", delay=0.2) # 200ms delay after typing + await computer.interface.press_key("enter", delay=0.5) # 500ms delay after key press + ``` + + + + +## Python Virtual Environment Operations + +Manage Python environments: + + + + ```python + # Virtual environment management + await computer.venv_install("demo_venv", ["requests", "macos-pyxa"]) # Install packages in a virtual environment + await computer.venv_cmd("demo_venv", "python -c 'import requests; print(requests.get(`https://httpbin.org/ip`).json())'') # Run a shell command in a virtual environment + await computer.venv_exec("demo_venv", python_function_or_code, *args, **kwargs) # Run a Python function in a virtual environment and return the result / raise an exception + ``` + + + \ No newline at end of file diff --git a/docs/content/docs/computer-sdk/computer-ui.mdx b/docs/content/docs/computer-sdk/computer-ui.mdx new file mode 100644 index 00000000..22b131c0 --- /dev/null +++ b/docs/content/docs/computer-sdk/computer-ui.mdx @@ -0,0 +1,80 @@ +--- +title: Computer UI +--- + +The computer module includes a Gradio UI for creating and sharing demonstration data. We make it easy for people to build community datasets for better computer use models with an upload to Huggingface feature. + +```bash +# Install with UI support +pip install "cua-computer[ui]" +``` + + +For precise control of the computer, we recommend using VNC or Screen Sharing instead of the Computer Gradio UI. + + +### Building and Sharing Demonstrations with Huggingface + +Follow these steps to contribute your own demonstrations: + +#### 1. Set up Huggingface Access + +Set your HF_TOKEN in a .env file or in your environment variables: + +```bash +# In .env file +HF_TOKEN=your_huggingface_token +``` + +#### 2. Launch the Computer UI + +```python +# launch_ui.py +from computer.ui.gradio.app import create_gradio_ui +from dotenv import load_dotenv +load_dotenv('.env') + +app = create_gradio_ui() +app.launch(share=False) +``` + +For examples, see [Computer UI Examples](https://github.com/trycua/cua/tree/main/examples/computer_ui_examples.py) + +#### 3. Record Your Tasks + +
+View demonstration video + +
+ +Record yourself performing various computer tasks using the UI. + +#### 4. Save Your Demonstrations + +
+View demonstration video + +
+ +Save each task by picking a descriptive name and adding relevant tags (e.g., "office", "web-browsing", "coding"). + +#### 5. Record Additional Demonstrations + +Repeat steps 3 and 4 until you have a good amount of demonstrations covering different tasks and scenarios. + +#### 6. Upload to Huggingface + +
+View demonstration video + +
+ +Upload your dataset to Huggingface by: +- Naming it as `{your_username}/{dataset_name}` +- Choosing public or private visibility +- Optionally selecting specific tags to upload only tasks with certain tags + +#### Examples and Resources + +- Example Dataset: [ddupont/test-dataset](https://huggingface.co/datasets/ddupont/test-dataset) +- Find Community Datasets: πŸ” [Browse CUA Datasets on Huggingface](https://huggingface.co/datasets?other=cua) \ No newline at end of file diff --git a/docs/content/docs/computer-sdk/computers.mdx b/docs/content/docs/computer-sdk/computers.mdx index d2b2a3a1..9b920aee 100644 --- a/docs/content/docs/computer-sdk/computers.mdx +++ b/docs/content/docs/computer-sdk/computers.mdx @@ -109,7 +109,7 @@ cua provides local containers using different providers depending on your host o ```bash # Option 1: Pull from Docker Hub - docker pull trycua/cua-ubuntu:latest + docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest # Option 2: Build locally cd libs/kasm diff --git a/docs/content/docs/computer-sdk/meta.json b/docs/content/docs/computer-sdk/meta.json index f632538b..92e14612 100644 --- a/docs/content/docs/computer-sdk/meta.json +++ b/docs/content/docs/computer-sdk/meta.json @@ -4,6 +4,7 @@ "pages": [ "computers", "commands", + "computer-ui", "sandboxed-python" ] } diff --git a/docs/content/docs/computer-sdk/sandboxed-python.mdx b/docs/content/docs/computer-sdk/sandboxed-python.mdx index 1e7f6b78..5f1687bf 100644 --- a/docs/content/docs/computer-sdk/sandboxed-python.mdx +++ b/docs/content/docs/computer-sdk/sandboxed-python.mdx @@ -44,6 +44,32 @@ You can also install packages in the virtual environment using the `venv_install await my_computer.venv_install("myenv", ["requests"]) ``` +## Example: Interacting with macOS Applications + +You can use sandboxed functions to interact with macOS applications on a local Cua Computer (requires `os_type="darwin"`). This is particularly useful for automation tasks that involve GUI applications. + +```python +# Example: Use sandboxed functions to execute code in a Cua Container +from computer.helpers import sandboxed + +await computer.venv_install("demo_venv", ["macos-pyxa"]) # Install packages in a virtual environment + +@sandboxed("demo_venv") +def greet_and_print(name): + """Get the HTML of the current Safari tab""" + import PyXA + safari = PyXA.Application("Safari") + html = safari.current_document.source() + print(f"Hello from inside the container, {name}!") + return {"greeted": name, "safari_html": html} + +# When a @sandboxed function is called, it will execute in the container +result = await greet_and_print("Cua") +# Result: {"greeted": "Cua", "safari_html": "..."} +# stdout and stderr are also captured and printed / raised +print("Result from sandboxed function:", result) +``` + ## Error Handling If the remote execution fails, the decorator will retry up to `max_retries` times. If all attempts fail, the last exception is raised locally. diff --git a/docs/content/docs/libraries/agent/index.mdx b/docs/content/docs/libraries/agent/index.mdx index f0e1ab77..6772d5c8 100644 --- a/docs/content/docs/libraries/agent/index.mdx +++ b/docs/content/docs/libraries/agent/index.mdx @@ -8,109 +8,14 @@ github: The Agent library provides the ComputerAgent class and tools for building AI agents that automate workflows on Cua Computers. -## Reference +## Agent Loops -### Basic Usage +See the [Agent Loops](../agent-sdk/agent-loops) documentation for how agents process information and take actions. -```python -from agent import ComputerAgent -from computer import Computer +## Chat History -computer = Computer() # Connect to a cua container -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer] -) +See the [Chat History](../agent-sdk/chat-history) documentation for managing conversational context and turn-by-turn interactions. -prompt = "open github, navigate to trycua/cua" +## Callbacks -async for result in agent.run(prompt): - print("Agent:", result["output"][-1]["content"][0]["text"]) -``` - ---- - -### ComputerAgent Constructor Options - -The `ComputerAgent` constructor provides a wide range of options for customizing agent behavior, tool integration, callbacks, resource management, and more. - -| Parameter | Type | Default | Description | -| --------------------------- | ----------------- | ------------ | ---------------------------------------------------------------------------------------------------- | -| `model` | `str` | **required** | Model name (e.g., "claude-3-5-sonnet-20241022", "computer-use-preview", "omni+vertex_ai/gemini-pro") | -| `tools` | `List[Any]` | `None` | List of tools (e.g., computer objects, decorated functions) | -| `custom_loop` | `Callable` | `None` | Custom agent loop function (overrides auto-selection) | -| `only_n_most_recent_images` | `int` | `None` | If set, only keep the N most recent images in message history (adds ImageRetentionCallback) | -| `callbacks` | `List[Any]` | `None` | List of AsyncCallbackHandler instances for preprocessing/postprocessing | -| `verbosity` | `int` | `None` | Logging level (`logging.DEBUG`, `logging.INFO`, etc.; adds LoggingCallback) | -| `trajectory_dir` | `str` | `None` | Directory to save trajectory data (adds TrajectorySaverCallback) | -| `max_retries` | `int` | `3` | Maximum number of retries for failed API calls | -| `screenshot_delay` | `float` \| `int` | `0.5` | Delay before screenshots (seconds) | -| `use_prompt_caching` | `bool` | `False` | Use prompt caching to avoid reprocessing the same prompt (mainly for Anthropic) | -| `max_trajectory_budget` | `float` \| `dict` | `None` | If set, adds BudgetManagerCallback to track usage costs and stop when budget is exceeded | -| `**kwargs` | _any_ | | Additional arguments passed to the agent loop | - -#### Parameter Details - -- **model**: The LLM or agent model to use. Determines which agent loop is selected unless `custom_loop` is provided. -- **tools**: List of tools the agent can use (e.g., `Computer`, sandboxed Python functions, etc.). -- **custom_loop**: Optional custom agent loop function. If provided, overrides automatic loop selection. -- **only_n_most_recent_images**: If set, only the N most recent images are kept in the message history. Useful for limiting memory usage. Automatically adds `ImageRetentionCallback`. -- **callbacks**: List of callback instances for advanced preprocessing, postprocessing, logging, or custom hooks. See [Callbacks & Extensibility](#callbacks--extensibility). -- **verbosity**: Logging level (e.g., `logging.INFO`). If set, adds a logging callback. -- **trajectory_dir**: Directory path to save full trajectory data, including screenshots and responses. Adds `TrajectorySaverCallback`. -- **max_retries**: Maximum number of retries for failed API calls (default: 3). -- **screenshot_delay**: Delay (in seconds) before taking screenshots (default: 0.5). -- **use_prompt_caching**: Enables prompt caching for repeated prompts (mainly for Anthropic models). -- **max_trajectory_budget**: If set (float or dict), adds a budget manager callback that tracks usage costs and stops execution if the budget is exceeded. Dict allows advanced options (e.g., `{ "max_budget": 5.0, "raise_error": True }`). -- **\*\*kwargs**: Any additional keyword arguments are passed through to the agent loop or model provider. - -**Example with advanced options:** - -```python -from agent import ComputerAgent -from computer import Computer -from agent.callbacks import ImageRetentionCallback - -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - tools=[Computer(...)], - only_n_most_recent_images=3, - callbacks=[ImageRetentionCallback(only_n_most_recent_images=3)], - verbosity=logging.INFO, - trajectory_dir="trajectories", - max_retries=5, - screenshot_delay=1.0, - use_prompt_caching=True, - max_trajectory_budget={"max_budget": 5.0, "raise_error": True} -) -``` - ---- - -### Message Array (Multi-turn) - -```python -messages = [ - {"role": "user", "content": "go to trycua on gh"}, - # ... (reasoning, computer_call, computer_call_output, etc) -] -async for result in agent.run(messages): - # Handle output, tool invocations, screenshots, etc. - print("Agent:", result["output"][-1]["content"][0]["text"]) - messages += result["output"] # Add agent output to message array - ... -``` - -### Callbacks & Extensibility - -You can add preprocessing and postprocessing hooks using callbacks, or write your own by subclassing `AsyncCallbackHandler`: - -```python -from agent.callbacks import ImageRetentionCallback, PIIAnonymizationCallback - -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer], - callbacks=[ImageRetentionCallback(only_n_most_recent_images=3)] -) -``` +See the [Callbacks](../agent-sdk/callbacks) documentation for extending and customizing agent behavior with custom hooks. diff --git a/docs/content/docs/libraries/computer/index.mdx b/docs/content/docs/libraries/computer/index.mdx index 4ac698d6..6638f878 100644 --- a/docs/content/docs/libraries/computer/index.mdx +++ b/docs/content/docs/libraries/computer/index.mdx @@ -8,202 +8,16 @@ github: - https://github.com/trycua/cua/tree/main/libs/typescript/computer --- -The Computer library provides a Computer class that can be used to control and automate a container running the Computer Server. +The Computer library provides a Computer class for controlling and automating containers running the Computer Server. -## Reference +## Connecting to Computers -### Basic Usage +See the [Cua Computers](../computer-sdk/computers) documentation for how to connect to different computer types (cloud, local, or host desktop). -Connect to a cua cloud container: +## Computer Commands - - - ```python - from computer import Computer +See the [Commands](../computer-sdk/commands) documentation for all supported commands and interface methods (Shell, Mouse, Keyboard, File System, etc.). - computer = Computer( - os_type="linux", - provider_type="cloud", - name="your-container-name", - api_key="your-api-key" - ) +## Sandboxed Python Functions - computer = await computer.run() # Connect to a cua cloud container - ``` - - - - ```typescript - import { Computer, OSType } from '@trycua/computer'; - - const computer = new Computer({ - osType: OSType.LINUX, - name: "your-container-name", - apiKey: "your-api-key" - }); - - await computer.run(); // Connect to a cua cloud container - ``` - - - - -Connect to a cua local container: - - - - ```python - from computer import Computer - - computer = Computer( - os_type="macos" - ) - - computer = await computer.run() # Connect to the container - ``` - - - - -### Interface Actions - - - - ```python - # Shell Actions - result = await computer.interface.run_command(cmd) # Run shell command - # result.stdout, result.stderr, result.returncode - - # Mouse Actions - await computer.interface.left_click(x, y) # Left click at coordinates - await computer.interface.right_click(x, y) # Right click at coordinates - await computer.interface.double_click(x, y) # Double click at coordinates - await computer.interface.move_cursor(x, y) # Move cursor to coordinates - await computer.interface.drag_to(x, y, duration) # Drag to coordinates - await computer.interface.get_cursor_position() # Get current cursor position - await computer.interface.mouse_down(x, y, button="left") # Press and hold a mouse button - await computer.interface.mouse_up(x, y, button="left") # Release a mouse button - - # Keyboard Actions - await computer.interface.type_text("Hello") # Type text - await computer.interface.press_key("enter") # Press a single key - await computer.interface.hotkey("command", "c") # Press key combination - await computer.interface.key_down("command") # Press and hold a key - await computer.interface.key_up("command") # Release a key - - # Scrolling Actions - await computer.interface.scroll(x, y) # Scroll the mouse wheel - await computer.interface.scroll_down(clicks) # Scroll down - await computer.interface.scroll_up(clicks) # Scroll up - - # Screen Actions - await computer.interface.screenshot() # Take a screenshot - await computer.interface.get_screen_size() # Get screen dimensions - - # Clipboard Actions - await computer.interface.set_clipboard(text) # Set clipboard content - await computer.interface.copy_to_clipboard() # Get clipboard content - - # File System Operations - await computer.interface.file_exists(path) # Check if file exists - await computer.interface.directory_exists(path) # Check if directory exists - await computer.interface.read_text(path, encoding="utf-8") # Read file content - await computer.interface.write_text(path, content, encoding="utf-8") # Write file content - await computer.interface.read_bytes(path) # Read file content as bytes - await computer.interface.write_bytes(path, content) # Write file content as bytes - await computer.interface.delete_file(path) # Delete file - await computer.interface.create_dir(path) # Create directory - await computer.interface.delete_dir(path) # Delete directory - await computer.interface.list_dir(path) # List directory contents - - # Accessibility - await computer.interface.get_accessibility_tree() # Get accessibility tree - - # Delay Configuration - # Set default delay between all actions (in seconds) - computer.interface.delay = 0.5 # 500ms delay between actions - - # Or specify delay for individual actions - await computer.interface.left_click(x, y, delay=1.0) # 1 second delay after click - await computer.interface.type_text("Hello", delay=0.2) # 200ms delay after typing - await computer.interface.press_key("enter", delay=0.5) # 500ms delay after key press - - # Python Virtual Environment Operations - await computer.venv_install("demo_venv", ["requests", "macos-pyxa"]) # Install packages in a virtual environment - await computer.venv_cmd("demo_venv", "python -c 'import requests; print(requests.get(`https://httpbin.org/ip`).json())'') # Run a shell command in a virtual environment - await computer.venv_exec("demo_venv", python_function_or_code, *args, **kwargs) # Run a Python function in a virtual environment and return the result / raise an exception - - # Example: Use sandboxed functions to execute code in a Cua Container - from computer.helpers import sandboxed - - @sandboxed("demo_venv") - def greet_and_print(name): - """Get the HTML of the current Safari tab""" - import PyXA - safari = PyXA.Application("Safari") - html = safari.current_document.source() - print(f"Hello from inside the container, {name}!") - return {"greeted": name, "safari_html": html} - - # When a @sandboxed function is called, it will execute in the container - result = await greet_and_print("Cua") - # Result: {"greeted": "Cua", "safari_html": "..."} - # stdout and stderr are also captured and printed / raised - print("Result from sandboxed function:", result) - ``` - - - - ```typescript - // Shell Actions - const result = await computer.interface.runCommand(cmd); // Run shell command - // result.stdout, result.stderr, result.returncode - - // Mouse Actions - await computer.interface.leftClick(x, y); // Left click at coordinates - await computer.interface.rightClick(x, y); // Right click at coordinates - await computer.interface.doubleClick(x, y); // Double click at coordinates - await computer.interface.moveCursor(x, y); // Move cursor to coordinates - await computer.interface.dragTo(x, y, duration); // Drag to coordinates - await computer.interface.getCursorPosition(); // Get current cursor position - await computer.interface.mouseDown(x, y, "left"); // Press and hold a mouse button - await computer.interface.mouseUp(x, y, "left"); // Release a mouse button - - // Keyboard Actions - await computer.interface.typeText("Hello"); // Type text - await computer.interface.pressKey("enter"); // Press a single key - await computer.interface.hotkey("command", "c"); // Press key combination - await computer.interface.keyDown("command"); // Press and hold a key - await computer.interface.keyUp("command"); // Release a key - - // Scrolling Actions - await computer.interface.scroll(x, y); // Scroll the mouse wheel - await computer.interface.scrollDown(clicks); // Scroll down - await computer.interface.scrollUp(clicks); // Scroll up - - // Screen Actions - await computer.interface.screenshot(); // Take a screenshot - await computer.interface.getScreenSize(); // Get screen dimensions - - // Clipboard Actions - await computer.interface.setClipboard(text); // Set clipboard content - await computer.interface.copyToClipboard(); // Get clipboard content - - // File System Operations - await computer.interface.fileExists(path); // Check if file exists - await computer.interface.directoryExists(path); // Check if directory exists - await computer.interface.readText(path, "utf-8"); // Read file content - await computer.interface.writeText(path, content, "utf-8"); // Write file content - await computer.interface.readBytes(path); // Read file content as bytes - await computer.interface.writeBytes(path, content); // Write file content as bytes - await computer.interface.deleteFile(path); // Delete file - await computer.interface.createDir(path); // Create directory - await computer.interface.deleteDir(path); // Delete directory - await computer.interface.listDir(path); // List directory contents - - // Accessibility - await computer.interface.getAccessibilityTree(); // Get accessibility tree - ``` - - - +See the [Sandboxed Python](../computer-sdk/sandboxed-python) documentation for running Python functions securely in isolated environments on a remote Cua Computer. \ No newline at end of file diff --git a/docs/content/docs/libraries/lume/cli-reference.mdx b/docs/content/docs/libraries/lume/cli-reference.mdx index da8bdee1..5afcc7fe 100644 --- a/docs/content/docs/libraries/lume/cli-reference.mdx +++ b/docs/content/docs/libraries/lume/cli-reference.mdx @@ -3,46 +3,7 @@ title: Lume CLI Reference description: Command Line Interface reference for Lume --- -Lume is a lightweight Command Line Interface and local API server for creating, running and managing **macOS and Linux virtual machines** with near-native performance on Apple Silicon, using Apple's [Virtualization.Framework](https://developer.apple.com/documentation/virtualization). - -## Quick Start - -Install and run a prebuilt macOS VM in two commands: - -```bash -# Install Lume -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" -# Pull & start a macOS image -lume run macos-sequoia-vanilla:latest -``` - -> **Security Note**: All prebuilt images use the default password `lume`. Change this immediately after your first login using the `passwd` command. - -**System Requirements**: -- Apple Silicon Mac (M1, M2, M3, etc.) -- macOS 13.0 or later -- At least 8GB of RAM (16GB recommended) -- At least 50GB of free disk space - -## Install - -Install with a single command: - -```bash -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" -``` - -By default, Lume is installed as a background service that starts automatically on login. If you prefer to start the Lume API service manually when needed, you can use the `--no-background-service` option: - -```bash -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh) --no-background-service" -``` - -> **Note:** With this option, you'll need to manually start the Lume API service by running `lume serve` in your terminal whenever you need to use tools or libraries that rely on the Lume API (such as the Computer-Use Agent). - -You can also download the `lume.pkg.tar.gz` archive from the [latest release](https://github.com/trycua/cua/releases?q=lume&expanded=true), extract it, and install the package manually. - -## Using Lume +import { Callout } from 'fumadocs-ui/components/callout'; Once installed, you can start using Lume with these common workflows: @@ -56,7 +17,9 @@ lume run macos-sequoia-vanilla:latest lume run ubuntu-noble-vanilla:latest ``` -> We provide [prebuilt VM images](#prebuilt-images) in our [ghcr registry](https://github.com/orgs/trycua/packages). + +We provide [prebuilt VM images](../lume/prebuilt-images) in our [ghcr registry](https://github.com/orgs/trycua/packages). + ### Create a Custom VM @@ -68,4 +31,179 @@ lume create my-macos-vm --cpu 4 --memory 8GB --disk-size 50GB lume create my-linux-vm --os linux --cpu 2 --memory 4GB ``` -> **Disk Space**: The actual disk space used by sparse images will be much lower than the logical size listed. You can resize VM disks after creation using `lume set --disk-size `. + +The actual disk space used by sparse images will be much lower than the logical size listed. You can resize VM disks after creation using `lume set --disk-size `. + + +## VM Management + + lume create <name> +Create a new macOS or Linux virtual machine. + +**Options:** +- `--os ` - Operating system to install (macOS or linux, default: macOS) +- `--cpu ` - Number of CPU cores (default: 4) +- `--memory ` - Memory size, e.g., 8GB (default: 4GB) +- `--disk-size ` - Disk size, e.g., 50GB (default: 40GB) +- `--display ` - Display resolution (default: 1024x768) +- `--ipsw ` - Path to IPSW file or 'latest' for macOS VMs +- `--storage ` - VM storage location to use + +**Examples:** +```bash +# Create macOS VM with custom specs +lume create my-mac --cpu 6 --memory 16GB --disk-size 100GB + +# Create Linux VM +lume create my-ubuntu --os linux --cpu 2 --memory 8GB + +# Create macOS VM with latest IPSW +lume create my-sequoia --ipsw latest +``` + + lume run <name> +Start and run a virtual machine. + +**Options:** +- `--no-display` - Do not start the VNC client app +- `--shared-dir ` - Share directory with VM (format: path[:ro|rw]) +- `--mount ` - For Linux VMs only, attach a read-only disk image +- `--registry ` - Container registry URL (default: ghcr.io) +- `--organization ` - Organization to pull from (default: trycua) +- `--vnc-port ` - Port to use for the VNC server (default: 0 for auto-assign) +- `--recovery-mode ` - For macOS VMs only, start VM in recovery mode (default: false) +- `--storage ` - VM storage location to use + +**Examples:** +```bash +# Run VM with shared directory +lume run my-vm --shared-dir /path/to/share:rw + +# Run VM without display (headless) +lume run my-vm --no-display + +# Run macOS VM in recovery mode +lume run my-mac --recovery-mode true +``` + + lume stop <name> +Stop a running virtual machine. + +**Options:** +- `--storage ` - VM storage location to use + +### lume delete <name> +Delete a virtual machine and its associated files. + +**Options:** +- `--force` - Force deletion without confirmation +- `--storage ` - VM storage location to use + +### lume clone <name> <new-name> +Create a copy of an existing virtual machine. + +**Options:** +- `--source-storage ` - Source VM storage location +- `--dest-storage ` - Destination VM storage location + +## VM Information and Configuration + +### lume ls +List all virtual machines and their status. + +### lume get <name> +Get detailed information about a specific virtual machine. + +**Options:** +- `-f, --format ` - Output format (json|text) +- `--storage ` - VM storage location to use + +### lume set <name> +Modify virtual machine configuration. + +**Options:** +- `--cpu ` - New number of CPU cores (e.g., 4) +- `--memory ` - New memory size (e.g., 8192MB or 8GB) +- `--disk-size ` - New disk size (e.g., 40960MB or 40GB) +- `--display ` - New display resolution in format WIDTHxHEIGHT (e.g., 1024x768) +- `--storage ` - VM storage location to use + +**Examples:** +```bash +# Increase VM memory +lume set my-vm --memory 16GB + +# Change display resolution +lume set my-vm --display 1920x1080 + +# Add more CPU cores +lume set my-vm --cpu 8 +``` + +## Image Management + +### lume images +List available macOS images in local cache. + +### lume pull <image> +Download a VM image from a container registry. + +**Options:** +- `--registry ` - Container registry URL (default: ghcr.io) +- `--organization ` - Organization to pull from (default: trycua) +- `--storage ` - VM storage location to use + +### lume push <name> <image:tag> +Upload a VM image to a container registry. + +**Options:** +- `--additional-tags ` - Additional tags to push the same image to +- `--registry ` - Container registry URL (default: ghcr.io) +- `--organization ` - Organization/user to push to (default: trycua) +- `--storage ` - VM storage location to use +- `--chunk-size-mb ` - Chunk size for disk image upload in MB (default: 512) +- `--verbose` - Enable verbose logging +- `--dry-run` - Prepare files and show plan without uploading +- `--reassemble` - Verify integrity by reassembling chunks (requires --dry-run) + +### lume ipsw +Get the latest macOS restore image URL. + +### lume prune +Remove cached images to free up disk space. + +## Configuration + +### lume config +Manage Lume configuration settings. + +**Subcommands:** + +##### Storage Management +- `lume config storage add ` - Add a new VM storage location +- `lume config storage remove ` - Remove a VM storage location +- `lume config storage list` - List all VM storage locations +- `lume config storage default ` - Set the default VM storage location + +##### Cache Management +- `lume config cache get` - Get current cache directory +- `lume config cache set ` - Set cache directory + +##### Image Caching +- `lume config caching get` - Show current caching status +- `lume config caching set ` - Enable or disable image caching + +## API Server + +### lume serve +Start the Lume API server for programmatic access. + +**Options:** +- `--port ` - Port to listen on (default: 7777) + +## Global Options + +These options are available for all commands: + +- `--help` - Show help information +- `--version` - Show version number \ No newline at end of file diff --git a/libs/lume/docs/FAQ.md b/docs/content/docs/libraries/lume/faq.md similarity index 99% rename from libs/lume/docs/FAQ.md rename to docs/content/docs/libraries/lume/faq.md index 21d0d287..98d6b766 100644 --- a/libs/lume/docs/FAQ.md +++ b/docs/content/docs/libraries/lume/faq.md @@ -1,4 +1,6 @@ -# FAQs +--- +title: FAQ +--- ### Where are the VMs stored? diff --git a/docs/content/docs/libraries/lume/http-api.mdx b/docs/content/docs/libraries/lume/http-api.mdx index 5191119c..04792f26 100644 --- a/docs/content/docs/libraries/lume/http-api.mdx +++ b/docs/content/docs/libraries/lume/http-api.mdx @@ -1,9 +1,10 @@ --- title: HTTP Server API -description: Lume exposes a local HTTP API server that listens at localhost for programatic management of VMs. +description: Lume exposes a local HTTP API server that listens at localhost for programmatic management of VMs. --- import { Tabs, Tab } from 'fumadocs-ui/components/tabs'; +import { Callout } from 'fumadocs-ui/components/callout'; ## Default URL @@ -19,11 +20,13 @@ http://localhost:7777 ## Endpoints +--- + ### Create VM Create a new virtual machine. -`POST: /vms` +`POST: /lume/vms` #### Parameters @@ -86,32 +89,34 @@ print(r.json()) ```typescript const payload = { - name: "lume_vm", - os: "macOS", + name: 'lume_vm', + os: 'macOS', cpu: 2, - memory: "4GB", - diskSize: "64GB", - display: "1024x768", - ipsw: "latest", - storage: "ssd" -} + memory: '4GB', + diskSize: '64GB', + display: '1024x768', + ipsw: 'latest', + storage: 'ssd', +}; const res = await fetch('http://localhost:7777/lume/vms', { - methdo: 'POST' - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload), + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload), }); -console.log(await res.json()) +console.log(await res.json()); ``` +--- + ### Run VM Run a virtual machine instance. -`POST: /vms/:name/run` +`POST: /lume/vms/:name/run` #### Parameters @@ -181,7 +186,7 @@ print(r.json()) ```typescript // Basic run -const res = await fetch('http://localhost:7777/lume/vms/my-vm-name/run', { +let res = await fetch('http://localhost:7777/lume/vms/my-vm-name/run', { method: 'POST', }); console.log(await res.json()); @@ -193,22 +198,24 @@ const payload = { recoveryMode: false, storage: 'ssd', }; -const res2 = await fetch('http://localhost:7777/lume/vms/lume_vm/run', { +res = await fetch('http://localhost:7777/lume/vms/lume_vm/run', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload), }); -console.log(await res2.json()); +console.log(await res.json()); ``` +--- + ### List VMs List all virtual machines. -`GET: /vms` +`GET: /lume/vms` #### Example Request @@ -263,11 +270,13 @@ console.log(await res.json()); ] ``` +--- + ### Get VM Details Get details for a specific virtual machine. -`GET: /vms/:name` +`GET: /lume/vms/:name` #### Parameters @@ -312,12 +321,12 @@ print(details.json()) ```typescript // Basic get -const res = await fetch('http://localhost:7777/lume/vms/lume_vm'); +let res = await fetch('http://localhost:7777/lume/vms/lume_vm'); console.log(await res.json()); // Get with specific storage -const res2 = await fetch('http://localhost:7777/lume/vms/lume_vm?storage=ssd'); -console.log(await res2.json()); +res = await fetch('http://localhost:7777/lume/vms/lume_vm?storage=ssd'); +console.log(await res.json()); ``` @@ -344,11 +353,13 @@ console.log(await res2.json()); } ``` +--- + ### Update VM Configuration Update the configuration of a virtual machine. -`PUT: /vms/:name` +`PATCH: /lume/vms/:name` #### Parameters @@ -368,7 +379,7 @@ Update the configuration of a virtual machine. ```bash curl --connect-timeout 6000 \ --max-time 5000 \ - -X PUT \ + -X PATCH \ -H "Content-Type: application/json" \ -d '{ "cpu": 4, @@ -393,7 +404,7 @@ payload = { "display": "1920x1080", "storage": "ssd" } -r = requests.put("http://localhost:7777/lume/vms/lume_vm", json=payload, timeout=50) +r = requests.patch("http://localhost:7777/lume/vms/lume_vm", json=payload, timeout=50) print(r.json()) ``` @@ -409,7 +420,7 @@ const payload = { storage: 'ssd', }; const res = await fetch('http://localhost:7777/lume/vms/lume_vm', { - method: 'PUT', + method: 'PATCH', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload), }); @@ -419,11 +430,19 @@ console.log(await res.json()); +--- + ### Stop VM Stop a running virtual machine. -`POST: /vms/:name/stop` +`POST: /lume/vms/:name/stop` + +#### Parameters + +| Name | Type | Required | Description | +| ------- | ------ | -------- | -------------------------- | +| storage | string | No | Storage type (`ssd`, etc.) | #### Example Request @@ -431,10 +450,17 @@ Stop a running virtual machine. ```bash +# Basic stop curl --connect-timeout 6000 \ --max-time 5000 \ -X POST \ http://localhost:7777/lume/vms/lume_vm/stop + +# Stop with storage location specified +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + http://localhost:7777/lume/vms/lume_vm/stop?storage=ssd ``` @@ -443,15 +469,27 @@ curl --connect-timeout 6000 \ ```python import requests +# Basic stop r = requests.post("http://localhost:7777/lume/vms/lume_vm/stop", timeout=50) print(r.json()) + +# Stop with storage location specified +r = requests.post("http://localhost:7777/lume/vms/lume_vm/stop", params={"storage": "ssd"}, timeout=50) +print(r.json()) ``` ```typescript -const res = await fetch('http://localhost:7777/lume/vms/lume_vm/stop', { +// Basic stop +let res = await fetch('http://localhost:7777/lume/vms/lume_vm/stop', { + method: 'POST', +}); +console.log(await res.json()); + +// Stop with storage location specified +res = await fetch('http://localhost:7777/lume/vms/lume_vm/stop?storage=ssd', { method: 'POST', }); console.log(await res.json()); @@ -460,11 +498,13 @@ console.log(await res.json()); +--- + ### Delete VM Delete a virtual machine instance. -`DELETE: /vms/:name` +`DELETE: /lume/vms/:name` #### Parameters @@ -511,34 +551,110 @@ print(r.status_code) ```typescript // Basic delete -const res = await fetch('http://localhost:7777/lume/vms/lume_vm', { +let res = await fetch('http://localhost:7777/lume/vms/lume_vm', { method: 'DELETE', }); console.log(res.status); // Delete with specific storage -const res2 = await fetch('http://localhost:7777/lume/vms/lume_vm?storage=ssd', { +res = await fetch('http://localhost:7777/lume/vms/lume_vm?storage=ssd', { method: 'DELETE', }); -console.log(res2.status); +console.log(res.status); ``` +--- + +### Clone VM + +Clone an existing virtual machine. + +`POST: /lume/vms/clone` + +#### Parameters + +| Name | Type | Required | Description | +| -------------- | ------ | -------- | ----------------------------------- | +| name | string | Yes | Source VM name | +| newName | string | Yes | New VM name | +| sourceLocation | string | No | Source storage location (`default`) | +| destLocation | string | No | Destination storage location | + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "name": "source-vm", + "newName": "cloned-vm", + "sourceLocation": "default", + "destLocation": "ssd" + }' \ + http://localhost:7777/lume/vms/clone +``` + + + + +```python +import requests + +payload = { + "name": "source-vm", + "newName": "cloned-vm", + "sourceLocation": "default", + "destLocation": "ssd" +} +r = requests.post("http://localhost:7777/lume/vms/clone", json=payload, timeout=50) +print(r.json()) +``` + + + + +```typescript +const payload = { + name: 'source-vm', + newName: 'cloned-vm', + sourceLocation: 'default', + destLocation: 'ssd', +}; +const res = await fetch('http://localhost:7777/lume/vms/clone', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload), +}); +console.log(await res.json()); +``` + + + + +--- + ### Pull VM Image Pull a VM image from a registry. -`POST: /images/pull` +`POST: /lume/pull` #### Parameters | Name | Type | Required | Description | | ------------ | ------ | -------- | ------------------------------------- | | image | string | Yes | Image name (e.g. `macos-sequoia-...`) | -| registry | string | Yes | Registry host (e.g. `ghcr.io`) | -| organization | string | Yes | Organization name | +| name | string | No | VM name for the pulled image | +| registry | string | No | Registry host (e.g. `ghcr.io`) | +| organization | string | No | Organization name | | storage | string | No | Storage type (`ssd`, etc.) | #### Example Request @@ -553,11 +669,12 @@ curl --connect-timeout 6000 \ -H "Content-Type: application/json" \ -d '{ "image": "macos-sequoia-vanilla:latest", + "name": "my-vm-name", "registry": "ghcr.io", "organization": "trycua", "storage": "ssd" }' \ - http://localhost:7777/lume/images/pull + http://localhost:7777/lume/pull ``` @@ -568,11 +685,12 @@ import requests payload = { "image": "macos-sequoia-vanilla:latest", + "name": "my-vm-name", "registry": "ghcr.io", "organization": "trycua", "storage": "ssd" } -r = requests.post("http://localhost:7777/lume/images/pull", json=payload, timeout=50) +r = requests.post("http://localhost:7777/lume/pull", json=payload, timeout=50) print(r.json()) ``` @@ -582,11 +700,12 @@ print(r.json()) ```typescript const payload = { image: 'macos-sequoia-vanilla:latest', + name: 'my-vm-name', registry: 'ghcr.io', organization: 'trycua', storage: 'ssd', }; -const res = await fetch('http://localhost:7777/lume/images/pull', { +const res = await fetch('http://localhost:7777/lume/pull', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload), @@ -596,3 +715,553 @@ console.log(await res.json()); + +--- + +### Push VM Image + +Push a VM to a registry as an image (asynchronous operation). + +`POST: /lume/vms/push` + +#### Parameters + +| Name | Type | Required | Description | +| ------------ | ------------ | -------- | ----------------------------------------------- | +| name | string | Yes | Local VM name to push | +| imageName | string | Yes | Image name in registry | +| tags | array | Yes | Image tags (e.g. `["latest", "v1"]`) | +| organization | string | Yes | Organization name | +| registry | string | No | Registry host (e.g. `ghcr.io`) | +| chunkSizeMb | integer | No | Chunk size in MB for upload | +| storage | string/null | No | Storage type (`ssd`, etc.) | + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "name": "my-local-vm", + "imageName": "my-image", + "tags": ["latest", "v1"], + "organization": "my-org", + "registry": "ghcr.io", + "chunkSizeMb": 512, + "storage": null + }' \ + http://localhost:7777/lume/vms/push +``` + + + + +```python +import requests + +payload = { + "name": "my-local-vm", + "imageName": "my-image", + "tags": ["latest", "v1"], + "organization": "my-org", + "registry": "ghcr.io", + "chunkSizeMb": 512, + "storage": None +} +r = requests.post("http://localhost:7777/lume/vms/push", json=payload, timeout=50) +print(r.json()) +``` + + + + +```typescript +const payload = { + name: 'my-local-vm', + imageName: 'my-image', + tags: ['latest', 'v1'], + organization: 'my-org', + registry: 'ghcr.io', + chunkSizeMb: 512, + storage: null, +}; +const res = await fetch('http://localhost:7777/lume/vms/push', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload), +}); +console.log(await res.json()); +``` + + + + +**Response (202 Accepted):** + +```json +{ + "message": "Push initiated in background", + "name": "my-local-vm", + "imageName": "my-image", + "tags": [ + "latest", + "v1" + ] +} +``` + +--- + +### List Images + +List available VM images. + +`GET: /lume/images` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + http://localhost:7777/lume/images +``` + + + + +```python +import requests + +r = requests.get("http://localhost:7777/lume/images", timeout=50) +print(r.json()) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/images'); +console.log(await res.json()); +``` + + + + +```json +{ + "local": [ + "macos-sequoia-xcode:latest", + "macos-sequoia-vanilla:latest" + ] +} +``` + +--- + +### Prune Images + +Remove unused VM images to free up disk space. + +`POST: /lume/prune` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + http://localhost:7777/lume/prune +``` + + + + +```python +import requests + +r = requests.post("http://localhost:7777/lume/prune", timeout=50) +print(r.json()) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/prune', { + method: 'POST', +}); +console.log(await res.json()); +``` + + + + +--- + +### Get Latest IPSW URL + +Get the URL for the latest macOS IPSW file. + +`GET: /lume/ipsw` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + http://localhost:7777/lume/ipsw +``` + + + + +```python +import requests + +r = requests.get("http://localhost:7777/lume/ipsw", timeout=50) +print(r.json()) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/ipsw'); +console.log(await res.json()); +``` + + + + +--- + +## Configuration Management + +### Get Configuration + +Get current Lume configuration settings. + +`GET: /lume/config` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + http://localhost:7777/lume/config +``` + + + + +```python +import requests + +r = requests.get("http://localhost:7777/lume/config", timeout=50) +print(r.json()) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/config'); +console.log(await res.json()); +``` + + + + +```json +{ + "homeDirectory": "~/.lume", + "cacheDirectory": "~/.lume/cache", + "cachingEnabled": true +} +``` + +### Update Configuration + +Update Lume configuration settings. + +`POST: /lume/config` + +#### Parameters + +| Name | Type | Required | Description | +| --------------- | ------- | -------- | -------------------------------- | +| homeDirectory | string | No | Lume home directory path | +| cacheDirectory | string | No | Cache directory path | +| cachingEnabled | boolean | No | Enable or disable caching | + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "homeDirectory": "~/custom/lume", + "cacheDirectory": "~/custom/lume/cache", + "cachingEnabled": true + }' \ + http://localhost:7777/lume/config +``` + + + + +```python +import requests + +payload = { + "homeDirectory": "~/custom/lume", + "cacheDirectory": "~/custom/lume/cache", + "cachingEnabled": True +} +r = requests.post("http://localhost:7777/lume/config", json=payload, timeout=50) +print(r.json()) +``` + + + + +```typescript +const payload = { + homeDirectory: '~/custom/lume', + cacheDirectory: '~/custom/lume/cache', + cachingEnabled: true, +}; +const res = await fetch('http://localhost:7777/lume/config', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload), +}); +console.log(await res.json()); +``` + + + + +--- + +## Storage Location Management + +### Get VM Storage Locations + +List all configured VM storage locations. + +`GET: /lume/config/locations` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + http://localhost:7777/lume/config/locations +``` + + + + +```python +import requests + +r = requests.get("http://localhost:7777/lume/config/locations", timeout=50) +print(r.json()) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/config/locations'); +console.log(await res.json()); +``` + + + + +```json +[ + { + "name": "default", + "path": "~/.lume/vms", + "isDefault": true + }, + { + "name": "ssd", + "path": "/Volumes/SSD/lume/vms", + "isDefault": false + } +] +``` + +### Add VM Storage Location + +Add a new VM storage location. + +`POST: /lume/config/locations` + +#### Parameters + +| Name | Type | Required | Description | +| ---- | ------ | -------- | ---------------------------- | +| name | string | Yes | Storage location name | +| path | string | Yes | File system path for storage | + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "name": "ssd", + "path": "/Volumes/SSD/lume/vms" + }' \ + http://localhost:7777/lume/config/locations +``` + + + + +```python +import requests + +payload = { + "name": "ssd", + "path": "/Volumes/SSD/lume/vms" +} +r = requests.post("http://localhost:7777/lume/config/locations", json=payload, timeout=50) +print(r.json()) +``` + + + + +```typescript +const payload = { + name: 'ssd', + path: '/Volumes/SSD/lume/vms', +}; +const res = await fetch('http://localhost:7777/lume/config/locations', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload), +}); +console.log(await res.json()); +``` + + + + +### Remove VM Storage Location + +Remove a VM storage location. + +`DELETE: /lume/config/locations/:name` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X DELETE \ + http://localhost:7777/lume/config/locations/ssd +``` + + + + +```python +import requests + +r = requests.delete("http://localhost:7777/lume/config/locations/ssd", timeout=50) +print(r.status_code) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/config/locations/ssd', { + method: 'DELETE', +}); +console.log(res.status); +``` + + + + +### Set Default VM Storage Location + +Set a storage location as the default. + +`POST: /lume/config/locations/default/:name` + +#### Example Request + + + + +```bash +curl --connect-timeout 6000 \ + --max-time 5000 \ + -X POST \ + http://localhost:7777/lume/config/locations/default/ssd +``` + + + + +```python +import requests + +r = requests.post("http://localhost:7777/lume/config/locations/default/ssd", timeout=50) +print(r.json()) +``` + + + + +```typescript +const res = await fetch('http://localhost:7777/lume/config/locations/default/ssd', { + method: 'POST', +}); +console.log(await res.json()); +``` + + + diff --git a/docs/content/docs/libraries/lume/index.mdx b/docs/content/docs/libraries/lume/index.mdx index 28080bff..d62c80e0 100644 --- a/docs/content/docs/libraries/lume/index.mdx +++ b/docs/content/docs/libraries/lume/index.mdx @@ -5,6 +5,4 @@ github: - https://github.com/trycua/cua/tree/main/libs/lume --- -## Overview - -The Lume CLI provides command line tools for managing virtual machines with Lume. +Lume is a lightweight Command Line Interface and local API server for creating, running and managing **macOS and Linux virtual machines** with near-native performance on Apple Silicon, using Apple's [Virtualization.Framework](https://developer.apple.com/documentation/virtualization). \ No newline at end of file diff --git a/docs/content/docs/libraries/lume/installation.mdx b/docs/content/docs/libraries/lume/installation.mdx new file mode 100644 index 00000000..161e48e0 --- /dev/null +++ b/docs/content/docs/libraries/lume/installation.mdx @@ -0,0 +1,47 @@ +--- +title: Installation +description: Installation instructions for the current version of the Lume CLI. +--- + +## Quickstart + +Install and run a prebuilt macOS VM in two commands: + +```bash +# Install Lume +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" +# Pull & start a macOS image +lume run macos-sequoia-vanilla:latest +``` + + +All prebuilt images use the default password `lume`. Change this immediately after your first login using the `passwd` command. + + +**System Requirements**: +- Apple Silicon Mac (M1, M2, M3, etc.) +- macOS 13.0 or later +- At least 8GB of RAM (16GB recommended) +- At least 50GB of free disk space + +## Install with Script + +Install with a single command: + +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" +``` + +### Manual Start (No Background Service) +By default, Lume is installed as a background service that starts automatically on login. If you prefer to start the Lume API service manually when needed, you can use the `--no-background-service` option: + +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh) --no-background-service" +``` + + +With this option, you'll need to manually start the Lume API service by running `lume serve` in your terminal whenever you need to use tools or libraries that rely on the Lume API (such as the Computer-Use Agent). + + +## Manual Download and Installation +You can also download the `lume.pkg.tar.gz` archive from the [latest release](https://github.com/trycua/cua/releases?q=lume&expanded=true), extract it, and install the package manually. \ No newline at end of file diff --git a/docs/content/docs/libraries/lume/meta.json b/docs/content/docs/libraries/lume/meta.json new file mode 100644 index 00000000..5f4d907a --- /dev/null +++ b/docs/content/docs/libraries/lume/meta.json @@ -0,0 +1,9 @@ +{ + "pages": [ + "installation", + "prebuilt-images", + "cli-reference", + "http-api", + "faq" + ] +} diff --git a/docs/content/docs/libraries/lume/prebuilt-images.mdx b/docs/content/docs/libraries/lume/prebuilt-images.mdx new file mode 100644 index 00000000..0120af43 --- /dev/null +++ b/docs/content/docs/libraries/lume/prebuilt-images.mdx @@ -0,0 +1,20 @@ +--- +title: Prebuilt Images +--- + +Pre-built images are available in the registry [ghcr.io/trycua](https://github.com/orgs/trycua/packages). + +**Important Note (v0.2.0+):** Images are being re-uploaded with sparse file system optimizations enabled, resulting in significantly lower actual disk usage. Older images (without the `-sparse` suffix) are now **deprecated**. The last version of `lume` fully supporting the non-sparse images was `v0.1.x`. Starting from `v0.2.0`, lume will automatically pull images optimized with sparse file system support. + +These images come with an SSH server pre-configured and auto-login enabled. + +For the security of your VM, change the default password `lume` immediately after your first login. + +| Image | Tag | Description | Logical Size | +|-------|------------|-------------|------| +| `macos-sequoia-vanilla` | `latest`, `15.2` | macOS Sequoia 15.2 image | 20GB | +| `macos-sequoia-xcode` | `latest`, `15.2` | macOS Sequoia 15.2 image with Xcode command line tools | 22GB | +| `macos-sequoia-cua` | `latest`, `15.3` | macOS Sequoia 15.3 image compatible with the Computer interface | 24GB | +| `ubuntu-noble-vanilla` | `latest`, `24.04.1` | [Ubuntu Server for ARM 24.04.1 LTS](https://ubuntu.com/download/server/arm) with Ubuntu Desktop | 20GB | + +For additional disk space, resize the VM disk after pulling the image using the `lume set --disk-size ` command. Note that the actual disk space used by sparse images will be much lower than the logical size listed. \ No newline at end of file diff --git a/docs/content/docs/libraries/lumier/building-lumier.mdx b/docs/content/docs/libraries/lumier/building-lumier.mdx new file mode 100644 index 00000000..df8ad4f8 --- /dev/null +++ b/docs/content/docs/libraries/lumier/building-lumier.mdx @@ -0,0 +1,42 @@ +--- +title: Building Lumier +--- + +If you want to customize the Lumier container or build it from source, you can follow these steps: + +```bash +# 1. Navigate to the Lumier directory +cd libs/lumier + +# 2. Build the Docker image locally +docker build -t lumier-custom:latest . + +# 3. Run your custom build +docker run -it --rm \ + --name lumier-vm \ + -p 8006:8006 \ + -e VM_NAME=lumier-vm \ + -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ + -e CPU_CORES=4 \ + -e RAM_SIZE=8192 \ + lumier-custom:latest +``` + +### Customization Options + +The Dockerfile provides several customization points: + +1. **Base image**: The container uses Debian Bullseye Slim as the base. You can modify this if needed. +2. **Installed packages**: You can add or remove packages in the apt-get install list. +3. **Hooks**: Check the `/run/hooks/` directory for scripts that run at specific points during VM lifecycle. +4. **Configuration**: Review `/run/config/constants.sh` for default settings. + +After making your modifications, you can build and push your custom image to your own Docker Hub repository: + +```bash +# Build with a custom tag +docker build -t yourusername/lumier:custom . + +# Push to Docker Hub (after docker login) +docker push yourusername/lumier:custom +``` \ No newline at end of file diff --git a/docs/content/docs/libraries/lumier/docker-compose.mdx b/docs/content/docs/libraries/lumier/docker-compose.mdx new file mode 100644 index 00000000..fece3473 --- /dev/null +++ b/docs/content/docs/libraries/lumier/docker-compose.mdx @@ -0,0 +1,45 @@ +--- +title: Docker Compose +--- + +You can also use Docker Compose to run Lumier with a simple configuration file. Create a `docker-compose.yml` file with the following content: + +```yaml +version: '3' + +services: + lumier: + image: trycua/lumier:latest + container_name: lumier-vm + restart: unless-stopped + ports: + - "8006:8006" # Port for VNC access + volumes: + - ./storage:/storage # VM persistent storage + - ./shared:/shared # Shared folder accessible in the VM + environment: + - VM_NAME=lumier-vm + - VERSION=ghcr.io/trycua/macos-sequoia-cua:latest + - CPU_CORES=4 + - RAM_SIZE=8192 + - HOST_STORAGE_PATH=${PWD}/storage + - HOST_SHARED_PATH=${PWD}/shared + stop_signal: SIGINT + stop_grace_period: 2m +``` + +Then run Lumier using: + +```bash +# First create the required directories +mkdir -p storage shared + +# Start the container +docker-compose up -d + +# View the logs +docker-compose logs -f + +# Stop the container when done +docker-compose down +``` diff --git a/docs/content/docs/libraries/lumier/docker.mdx b/docs/content/docs/libraries/lumier/docker.mdx new file mode 100644 index 00000000..4ecd15d6 --- /dev/null +++ b/docs/content/docs/libraries/lumier/docker.mdx @@ -0,0 +1,124 @@ +--- +title: Docker +--- + +You can use Lumier through Docker: + +### Run a macOS VM (ephemeral) +```bash +# Run the container with temporary storage (using pre-built image from Docker Hub) +docker run -it --rm \ + --name macos-vm \ + -p 8006:8006 \ + -e VM_NAME=macos-vm \ + -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ + -e CPU_CORES=4 \ + -e RAM_SIZE=8192 \ + trycua/lumier:latest +``` +Access the VM in your browser at [http://localhost:8006](http://localhost:8006). + +After running the command above, you can access your macOS VM through a web browser (e.g., http://localhost:8006). + + +With the basic setup above, your VM will be reset when you stop the container (ephemeral mode). This means any changes you make inside the macOS VM will be lost. See the section below for how to save your VM state. + + +## Saving Your VM State + +To save your VM state between sessions (so your changes persist when you stop and restart the container), you'll need to set up a storage location: + +```bash +# First, create a storage directory if it doesn't exist +mkdir -p storage + +# Then run the container with persistent storage +docker run -it --rm \ + --name lumier-vm \ + -p 8006:8006 \ + -v $(pwd)/storage:/storage \ + -e VM_NAME=lumier-vm \ + -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ + -e CPU_CORES=4 \ + -e RAM_SIZE=8192 \ + -e HOST_STORAGE_PATH=$(pwd)/storage \ + trycua/lumier:latest +``` + +This command creates a connection between a folder on your Mac (`$(pwd)/storage`) and a folder inside the Docker container (`/storage`). The `-v` flag (volume mount) and the `HOST_STORAGE_PATH` variable work together to ensure your VM data is saved on your host Mac. + +## Sharing Files with Your VM + +To share files between your Mac and the virtual machine, you can set up a shared folder: + +```bash +# Create both storage and shared folders +mkdir -p storage shared + +# Run with both persistent storage and a shared folder +docker run -it --rm \ + --name lumier-vm \ + -p 8006:8006 \ + -v $(pwd)/storage:/storage \ + -v $(pwd)/shared:/shared \ + -e VM_NAME=lumier-vm \ + -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ + -e CPU_CORES=4 \ + -e RAM_SIZE=8192 \ + -e HOST_STORAGE_PATH=$(pwd)/storage \ + -e HOST_SHARED_PATH=$(pwd)/shared \ + trycua/lumier:latest +``` + +With this setup, any files you place in the `shared` folder on your Mac will be accessible from within the macOS VM, and vice versa. + +## Automating VM Startup with on-logon.sh + +You can automatically run scripts when the VM starts up by placing an `on-logon.sh` script in the shared folder's lifecycle directory. This is useful for setting up your VM environment each time it starts. + +```bash +# Create the lifecycle directory in your shared folder +mkdir -p shared/lifecycle + +# Create a sample on-logon.sh script +cat > shared/lifecycle/on-logon.sh << 'EOF' +#!/usr/bin/env bash + +# Create a file on the desktop +echo "Hello from Lumier!" > /Users/lume/Desktop/hello_lume.txt + +# You can add more commands to execute at VM startup +# For example: +# - Configure environment variables +# - Start applications +# - Mount network drives +# - Set up development environments +EOF + +# Make the script executable +chmod +x shared/lifecycle/on-logon.sh +``` + +The script will be automatically executed when the VM starts up. It runs in the VM context and has access to: + +- The `/Users/lume` user directory (home directory in the VM) +- The shared folder at `/Volumes/My Shared Files` inside the VM +- Any resources available to the VM + +This feature enables automation of VM setup without modifying the base VM image. + +## Configuration Options + +When running Lumier, you'll need to configure a few things: + +- **Port forwarding** (`-p 8006:8006`): Makes the VM's VNC interface accessible in your browser. If port 8006 is already in use, you can use a different port like `-p 8007:8006`. + +- **Environment variables** (`-e`): Configure your VM settings: + - `VM_NAME`: A name for your virtual machine + - `VERSION`: The macOS image to use + - `CPU_CORES`: Number of CPU cores to allocate + - `RAM_SIZE`: Memory in MB to allocate + - `HOST_STORAGE_PATH`: Path to save VM state (when using persistent storage) + - `HOST_SHARED_PATH`: Path to the shared folder (optional) + +- **Background service**: The `lume serve` service should be running on your host (starts automatically when you install Lume using the `install.sh` script above). \ No newline at end of file diff --git a/docs/content/docs/libraries/lumier/index.mdx b/docs/content/docs/libraries/lumier/index.mdx index 3858504d..814055ba 100644 --- a/docs/content/docs/libraries/lumier/index.mdx +++ b/docs/content/docs/libraries/lumier/index.mdx @@ -5,65 +5,22 @@ github: - https://github.com/trycua/cua/tree/main/libs/lumier --- -## Overview +**Lumier** is an interface for running macOS virtual machines with minimal setup. It uses Docker as a packaging system to deliver a pre-configured environment that connects to the `lume` virtualization service running on your host machine. With Lumier, you get: -The Lumier library provides a Docker-based interface for creating performant macOS virtual machines. +- A ready-to-use macOS or Linux virtual machine in minutes +- Browser-based VNC access to your VM +- Easy file sharing between your host and VM +- Simple configuration through environment variables -## Installation +## How It Works -**Requirements:** -- Docker for Apple Silicon (or compatible Mac) -- Lume virtualization CLI (install with: `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)"`) + +We're using Docker primarily as a convenient delivery mechanism, not as an isolation layer. Unlike traditional Docker containers, Lumier leverages the Apple Virtualization Framework (Apple Vz) through the `lume` CLI to create true virtual machines. + -## Usage +Here's what's happening behind the scenes: -### Run a macOS VM (ephemeral) -```bash -docker run -it --rm \ - --name macos-vm \ - -p 8006:8006 \ - -e VM_NAME=macos-vm \ - -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ - -e CPU_CORES=4 \ - -e RAM_SIZE=8192 \ - trycua/lumier:latest -``` -Access the VM in your browser at [http://localhost:8006](http://localhost:8006). - -### Persistent Storage -```bash -mkdir -p storage - -docker run -it --rm \ - --name lumier-vm \ - -p 8006:8006 \ - -v $(pwd)/storage:/storage \ - -e VM_NAME=lumier-vm \ - -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ - -e CPU_CORES=4 \ - -e RAM_SIZE=8192 \ - -e HOST_STORAGE_PATH=$(pwd)/storage \ - trycua/lumier:latest -``` - -### Shared Folder -```bash -mkdir -p shared - -docker run -it --rm \ - --name lumier-vm \ - -p 8006:8006 \ - -v $(pwd)/storage:/storage \ - -v $(pwd)/shared:/shared \ - -e VM_NAME=lumier-vm \ - -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ - -e CPU_CORES=4 \ - -e RAM_SIZE=8192 \ - -e HOST_STORAGE_PATH=$(pwd)/storage \ - -e HOST_SHARED_PATH=$(pwd)/shared \ - trycua/lumier:latest -``` - ---- - -See the [Lumier README](https://github.com/trycua/cua/tree/main/libs/lumier) for advanced options, Docker Compose setup, and automation scripts. \ No newline at end of file +1. The Docker container provides a consistent environment to run the Lumier interface +2. Lumier connects to the Lume service running on your host Mac +3. Lume uses Apple's Virtualization Framework to create a true macOS virtual machine +4. The VM runs with hardware acceleration using your Mac's native virtualization capabilities \ No newline at end of file diff --git a/docs/content/docs/libraries/lumier/installation.mdx b/docs/content/docs/libraries/lumier/installation.mdx new file mode 100644 index 00000000..e0c20267 --- /dev/null +++ b/docs/content/docs/libraries/lumier/installation.mdx @@ -0,0 +1,14 @@ +--- +title: Installation +--- + +Before using Lumier, make sure you have: + +1. **Docker for Apple Silicon** - download it [here](https://desktop.docker.com/mac/main/arm64/Docker.dmg) and follow the installation instructions. + +2. **Lume** - This is the virtualization CLI that powers Lumier. Install it with this command: +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" +``` + +After installation, Lume runs as a background service and listens on port 7777. This service allows Lumier to create and manage virtual machines. If port 7777 is already in use on your system, you can specify a different port with the `--port` option when running the `install.sh` script. \ No newline at end of file diff --git a/docs/content/docs/libraries/lumier/meta.json b/docs/content/docs/libraries/lumier/meta.json new file mode 100644 index 00000000..f6a8946c --- /dev/null +++ b/docs/content/docs/libraries/lumier/meta.json @@ -0,0 +1,8 @@ +{ + "pages": [ + "installation", + "docker", + "docker-compose", + "building-lumier" + ] +} diff --git a/docs/content/docs/libraries/mcp-server/client-integrations.mdx b/docs/content/docs/libraries/mcp-server/client-integrations.mdx new file mode 100644 index 00000000..8699cda0 --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/client-integrations.mdx @@ -0,0 +1,20 @@ +--- +title: Client Integrations +--- + +## Claude Desktop Integration + +To use with Claude Desktop, add an entry to your Claude Desktop configuration (`claude_desktop_config.json`, typically found in `~/.config/claude-desktop/`): + +For more information on MCP with Claude Desktop, see the [official MCP User Guide](https://modelcontextprotocol.io/quickstart/user). + +## Cursor Integration + +To use with Cursor, add an MCP configuration file in one of these locations: + +- **Project-specific**: Create `.cursor/mcp.json` in your project directory +- **Global**: Create `~/.cursor/mcp.json` in your home directory + +After configuration, you can simply tell Cursor's Agent to perform computer tasks by explicitly mentioning the CUA agent, such as "Use the computer control tools to open Safari." + +For more information on MCP with Cursor, see the [official Cursor MCP documentation](https://docs.cursor.com/context/model-context-protocol). \ No newline at end of file diff --git a/docs/content/docs/libraries/mcp-server/configuration.mdx b/docs/content/docs/libraries/mcp-server/configuration.mdx new file mode 100644 index 00000000..e5df8293 --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/configuration.mdx @@ -0,0 +1,10 @@ +--- +title: Configuration +--- + +The server is configured using environment variables (can be set in the Claude Desktop config): + +| Variable | Description | Default | +|----------|-------------|---------| +| `CUA_MODEL_NAME` | Model string (e.g., "anthropic/claude-3-5-sonnet-20241022", "openai/computer-use-preview", "huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", "omniparser+litellm/gpt-4o", "omniparser+ollama_chat/gemma3") | anthropic/claude-3-5-sonnet-20241022 | +| `CUA_MAX_IMAGES` | Maximum number of images to keep in context | 3 | diff --git a/docs/content/docs/libraries/mcp-server/index.mdx b/docs/content/docs/libraries/mcp-server/index.mdx index f9885bf1..87c9a342 100644 --- a/docs/content/docs/libraries/mcp-server/index.mdx +++ b/docs/content/docs/libraries/mcp-server/index.mdx @@ -6,14 +6,4 @@ github: - https://github.com/trycua/cua/tree/main/libs/python/mcp-server --- -## ⚠️ 🚧 Under Construction 🚧 ⚠️ - -The MCP Server API reference documentation is currently under development. - -## Overview - -The MCP Server provides Model Context Protocol endpoints for AI model integration. - -## API Documentation - -Coming soon. +**cua-mcp-server** is a MCP server for the Computer-Use Agent (CUA), allowing you to run CUA through Claude Desktop or other MCP clients. \ No newline at end of file diff --git a/docs/content/docs/libraries/mcp-server/installation.mdx b/docs/content/docs/libraries/mcp-server/installation.mdx new file mode 100644 index 00000000..c04a4917 --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/installation.mdx @@ -0,0 +1,53 @@ +--- +title: Installation +--- + +Install the package from PyPI: + +```bash +pip install cua-mcp-server +``` + +This will install: +- The MCP server +- CUA agent and computer dependencies +- An executable `cua-mcp-server` script in your PATH + +## Easy Setup Script + +If you want to simplify installation, you can use this one-liner to download and run the installation script: + +```bash +curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/python/mcp-server/scripts/install_mcp_server.sh | bash +``` + +This script will: +- Create the ~/.cua directory if it doesn't exist +- Generate a startup script at ~/.cua/start_mcp_server.sh +- Make the script executable +- The startup script automatically manages Python virtual environments and installs/updates the cua-mcp-server package + +You can then use the script in your MCP configuration like this: + +```json +{ + "mcpServers": { + "cua-agent": { + "command": "/bin/bash", + "args": ["~/.cua/start_mcp_server.sh"], + "env": { + "CUA_MODEL_NAME": "anthropic/claude-3-5-sonnet-20241022" + } + } + } +} +``` + +### Troubleshooting + +If you get a `/bin/bash: ~/cua/libs/python/mcp-server/scripts/start_mcp_server.sh: No such file or directory` error, try changing the path to the script to be absolute instead of relative. + +To see the logs: +``` +tail -n 20 -f ~/Library/Logs/Claude/mcp*.log +``` \ No newline at end of file diff --git a/docs/content/docs/libraries/mcp-server/llm-integrations.mdx b/docs/content/docs/libraries/mcp-server/llm-integrations.mdx new file mode 100644 index 00000000..a7515ae2 --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/llm-integrations.mdx @@ -0,0 +1,16 @@ +--- +title: LLM Integrations +--- +## LiteLLM Integration + +This MCP server features comprehensive liteLLM integration, allowing you to use any supported LLM provider with a simple model string configuration. + +- **Unified Configuration**: Use a single `CUA_MODEL_NAME` environment variable with a model string +- **Automatic Provider Detection**: The agent automatically detects the provider and capabilities from the model string +- **Extensive Provider Support**: Works with Anthropic, OpenAI, local models, and any liteLLM-compatible provider + +### Model String Examples: +- **Anthropic**: `"anthropic/claude-3-5-sonnet-20241022"` +- **OpenAI**: `"openai/computer-use-preview"` +- **UI-TARS**: `"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B"` +- **Omni + Any LiteLLM**: `"omniparser+litellm/gpt-4o"`, `"omniparser+litellm/claude-3-haiku"`, `"omniparser+ollama_chat/gemma3"` \ No newline at end of file diff --git a/docs/content/docs/libraries/mcp-server/meta.json b/docs/content/docs/libraries/mcp-server/meta.json new file mode 100644 index 00000000..45fa4ba9 --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/meta.json @@ -0,0 +1,10 @@ +{ + "pages": [ + "installation", + "configuration", + "usage", + "tools", + "client-integrations", + "llm-integrations" + ] +} \ No newline at end of file diff --git a/docs/content/docs/libraries/mcp-server/tools.mdx b/docs/content/docs/libraries/mcp-server/tools.mdx new file mode 100644 index 00000000..edf29c0b --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/tools.mdx @@ -0,0 +1,10 @@ +--- +title: Tools +--- + +## Available Tools + +The MCP server exposes the following tools to Claude: + +1. `run_cua_task` - Run a single Computer-Use Agent task with the given instruction +2. `run_multi_cua_tasks` - Run multiple tasks in sequence \ No newline at end of file diff --git a/docs/content/docs/libraries/mcp-server/usage.mdx b/docs/content/docs/libraries/mcp-server/usage.mdx new file mode 100644 index 00000000..19eef934 --- /dev/null +++ b/docs/content/docs/libraries/mcp-server/usage.mdx @@ -0,0 +1,20 @@ +--- +title: Usage +--- + +## Usage + +Once configured, you can simply ask Claude to perform computer tasks: + +- "Open Chrome and go to github.com" +- "Create a folder called 'Projects' on my desktop" +- "Find all PDFs in my Downloads folder" +- "Take a screenshot and highlight the error message" + +Claude will automatically use your CUA agent to perform these tasks. + +### First-time Usage Notes + +**API Keys**: Ensure you have valid API keys: + - Add your Anthropic API key, or other model provider API key in the Claude Desktop config (as shown above) + - Or set it as an environment variable in your shell profile diff --git a/docs/content/docs/libraries/som/configuration.mdx b/docs/content/docs/libraries/som/configuration.mdx new file mode 100644 index 00000000..e57cdf1c --- /dev/null +++ b/docs/content/docs/libraries/som/configuration.mdx @@ -0,0 +1,66 @@ +--- +title: Configuration +--- + +### Detection Parameters + +#### Box Threshold (0.3) +Controls the confidence threshold for accepting detections: +Illustration of confidence thresholds in object detection, with a high-confidence detection accepted and a low-confidence detection rejected. +- Higher values (0.3) yield more precise but fewer detections +- Lower values (0.01) catch more potential icons but increase false positives +- Default is 0.3 for optimal precision/recall balance + +#### IOU Threshold (0.1) +Controls how overlapping detections are merged: +Diagram showing Intersection over Union (IOU) with low overlap between two boxes kept separate and high overlap leading to merging. +- Lower values (0.1) more aggressively remove overlapping boxes +- Higher values (0.5) allow more overlapping detections +- Default is 0.1 to handle densely packed UI elements + +### OCR Configuration + +- **Engine**: EasyOCR + - Primary choice for all platforms + - Fast initialization and processing + - Built-in English language support + - GPU acceleration when available + +- **Settings**: + - Timeout: 5 seconds + - Confidence threshold: 0.5 + - Paragraph mode: Disabled + - Language: English only + +## Performance + +### Hardware Acceleration + +#### MPS (Metal Performance Shaders) +- Multi-scale detection (640px, 1280px, 1920px) +- Test-time augmentation enabled +- Half-precision (FP16) +- Average detection time: ~0.4s +- Best for production use when available + +#### CPU +- Single-scale detection (1280px) +- Full-precision (FP32) +- Average detection time: ~1.3s +- Reliable fallback option + +### Example Output Structure + +``` +examples/output/ +β”œβ”€β”€ {timestamp}_no_ocr/ +β”‚ β”œβ”€β”€ annotated_images/ +β”‚ β”‚ └── screenshot_analyzed.png +β”‚ β”œβ”€β”€ screen_details.txt +β”‚ └── summary.json +└── {timestamp}_ocr/ + β”œβ”€β”€ annotated_images/ + β”‚ └── screenshot_analyzed.png + β”œβ”€β”€ screen_details.txt + └── summary.json +``` \ No newline at end of file diff --git a/docs/content/docs/quickstart-cli.mdx b/docs/content/docs/quickstart-cli.mdx index f9bcdefe..668ec49d 100644 --- a/docs/content/docs/quickstart-cli.mdx +++ b/docs/content/docs/quickstart-cli.mdx @@ -72,7 +72,7 @@ Choose how you want to run your cua computer. **Cloud containers are recommended 2. Pull the CUA Ubuntu container ```bash - docker pull trycua/cua-ubuntu:latest + docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest ``` diff --git a/docs/content/docs/quickstart-devs.mdx b/docs/content/docs/quickstart-devs.mdx index 1ba86e68..37367709 100644 --- a/docs/content/docs/quickstart-devs.mdx +++ b/docs/content/docs/quickstart-devs.mdx @@ -71,7 +71,7 @@ Choose how you want to run your cua computer. **Cloud containers are recommended 2. Pull the CUA Ubuntu container ```bash - docker pull trycua/cua-ubuntu:latest + docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest ``` @@ -87,6 +87,16 @@ Choose how you want to run your cua computer. **Cloud containers are recommended ```bash pip install "cua-agent[all]" cua-computer + + # or install specific providers + pip install "cua-agent[openai]" # OpenAI computer-use-preview support + pip install "cua-agent[anthropic]" # Anthropic Claude support + pip install "cua-agent[omni]" # Omniparser + any LLM support + pip install "cua-agent[uitars]" # UI-TARS + pip install "cua-agent[uitars-mlx]" # UI-TARS + MLX support + pip install "cua-agent[uitars-hf]" # UI-TARS + Huggingface support + pip install "cua-agent[glm45v-hf]" # GLM-4.5V + Huggingface support + pip install "cua-agent[ui]" # Gradio UI support ``` diff --git a/docs/content/docs/quickstart-ui.mdx b/docs/content/docs/quickstart-ui.mdx index ad6ea396..72bac935 100644 --- a/docs/content/docs/quickstart-ui.mdx +++ b/docs/content/docs/quickstart-ui.mdx @@ -72,7 +72,7 @@ Choose how you want to run your cua computer. **Cloud containers are recommended 2. Pull the CUA Ubuntu container ```bash - docker pull trycua/cua-ubuntu:latest + docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest ``` diff --git a/docs/package-lock.json b/docs/package-lock.json new file mode 100644 index 00000000..e4379add --- /dev/null +++ b/docs/package-lock.json @@ -0,0 +1,7189 @@ +{ + "name": "docs", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "docs", + "version": "0.0.0", + "hasInstallScript": true, + "dependencies": { + "fumadocs-core": "15.5.1", + "fumadocs-mdx": "11.6.7", + "fumadocs-ui": "15.5.1", + "lucide-react": "^0.525.0", + "mermaid": "^11.8.1", + "next": "15.3.3", + "next-themes": "^0.4.6", + "react": "^19.1.0", + "react-dom": "^19.1.0", + "remark": "^15.0.1", + "remark-gfm": "^4.0.1", + "remark-mdx": "^3.1.0", + "tailwind-merge": "^3.3.1", + "zod": "^3.25.76" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4.1.8", + "@types/mdx": "^2.0.13", + "@types/node": "22.15.28", + "@types/react": "^19.1.6", + "@types/react-dom": "^19.1.5", + "postcss": "^8.5.4", + "prettier": "^3.6.2", + "tailwindcss": "^4.1.8", + "typescript": "^5.8.3" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@antfu/install-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@antfu/install-pkg/-/install-pkg-1.1.0.tgz", + "integrity": "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==", + "license": "MIT", + "dependencies": { + "package-manager-detector": "^1.3.0", + "tinyexec": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@antfu/utils": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/@antfu/utils/-/utils-8.1.1.tgz", + "integrity": "sha512-Mex9nXf9vR6AhcXmMrlz/HVgYYZpVGJ6YlPgwl7UnaFpnshXs6EK/oa5Gpf3CzENMjkvEx2tQtntGnb7UtSTOQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@braintree/sanitize-url": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.1.1.tgz", + "integrity": "sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==", + "license": "MIT" + }, + "node_modules/@chevrotain/cst-dts-gen": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.0.3.tgz", + "integrity": "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/gast": "11.0.3", + "@chevrotain/types": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/@chevrotain/gast": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.0.3.tgz", + "integrity": "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/types": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/@chevrotain/regexp-to-ast": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.0.3.tgz", + "integrity": "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/types": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.0.3.tgz", + "integrity": "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/utils": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.0.3.tgz", + "integrity": "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==", + "license": "Apache-2.0" + }, + "node_modules/@emnapi/runtime": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.4.5.tgz", + "integrity": "sha512-++LApOtY0pEEz1zrd9vy1/zXVaVJJ/EbAF3u0fXIzPJEDtnITsBGbbK0EkM72amhl/R5b+5xx0Y/QhcVOpuulg==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.9.tgz", + "integrity": "sha512-OaGtL73Jck6pBKjNIe24BnFE6agGl+6KxDtTfHhy1HmhthfKouEcOhqpSL64K4/0WCtbKFLOdzD/44cJ4k9opA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.9.tgz", + "integrity": "sha512-5WNI1DaMtxQ7t7B6xa572XMXpHAaI/9Hnhk8lcxF4zVN4xstUgTlvuGDorBguKEnZO70qwEcLpfifMLoxiPqHQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.9.tgz", + "integrity": "sha512-IDrddSmpSv51ftWslJMvl3Q2ZT98fUSL2/rlUXuVqRXHCs5EUF1/f+jbjF5+NG9UffUDMCiTyh8iec7u8RlTLg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.9.tgz", + "integrity": "sha512-I853iMZ1hWZdNllhVZKm34f4wErd4lMyeV7BLzEExGEIZYsOzqDWDf+y082izYUE8gtJnYHdeDpN/6tUdwvfiw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.9.tgz", + "integrity": "sha512-XIpIDMAjOELi/9PB30vEbVMs3GV1v2zkkPnuyRRURbhqjyzIINwj+nbQATh4H9GxUgH1kFsEyQMxwiLFKUS6Rg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.9.tgz", + "integrity": "sha512-jhHfBzjYTA1IQu8VyrjCX4ApJDnH+ez+IYVEoJHeqJm9VhG9Dh2BYaJritkYK3vMaXrf7Ogr/0MQ8/MeIefsPQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.9.tgz", + "integrity": "sha512-z93DmbnY6fX9+KdD4Ue/H6sYs+bhFQJNCPZsi4XWJoYblUqT06MQUdBCpcSfuiN72AbqeBFu5LVQTjfXDE2A6Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.9.tgz", + "integrity": "sha512-mrKX6H/vOyo5v71YfXWJxLVxgy1kyt1MQaD8wZJgJfG4gq4DpQGpgTB74e5yBeQdyMTbgxp0YtNj7NuHN0PoZg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.9.tgz", + "integrity": "sha512-HBU2Xv78SMgaydBmdor38lg8YDnFKSARg1Q6AT0/y2ezUAKiZvc211RDFHlEZRFNRVhcMamiToo7bDx3VEOYQw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.9.tgz", + "integrity": "sha512-BlB7bIcLT3G26urh5Dmse7fiLmLXnRlopw4s8DalgZ8ef79Jj4aUcYbk90g8iCa2467HX8SAIidbL7gsqXHdRw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.9.tgz", + "integrity": "sha512-e7S3MOJPZGp2QW6AK6+Ly81rC7oOSerQ+P8L0ta4FhVi+/j/v2yZzx5CqqDaWjtPFfYz21Vi1S0auHrap3Ma3A==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.9.tgz", + "integrity": "sha512-Sbe10Bnn0oUAB2AalYztvGcK+o6YFFA/9829PhOCUS9vkJElXGdphz0A3DbMdP8gmKkqPmPcMJmJOrI3VYB1JQ==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.9.tgz", + "integrity": "sha512-YcM5br0mVyZw2jcQeLIkhWtKPeVfAerES5PvOzaDxVtIyZ2NUBZKNLjC5z3/fUlDgT6w89VsxP2qzNipOaaDyA==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.9.tgz", + "integrity": "sha512-++0HQvasdo20JytyDpFvQtNrEsAgNG2CY1CLMwGXfFTKGBGQT3bOeLSYE2l1fYdvML5KUuwn9Z8L1EWe2tzs1w==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.9.tgz", + "integrity": "sha512-uNIBa279Y3fkjV+2cUjx36xkx7eSjb8IvnL01eXUKXez/CBHNRw5ekCGMPM0BcmqBxBcdgUWuUXmVWwm4CH9kg==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.9.tgz", + "integrity": "sha512-Mfiphvp3MjC/lctb+7D287Xw1DGzqJPb/J2aHHcHxflUo+8tmN/6d4k6I2yFR7BVo5/g7x2Monq4+Yew0EHRIA==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.9.tgz", + "integrity": "sha512-iSwByxzRe48YVkmpbgoxVzn76BXjlYFXC7NvLYq+b+kDjyyk30J0JY47DIn8z1MO3K0oSl9fZoRmZPQI4Hklzg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.9.tgz", + "integrity": "sha512-9jNJl6FqaUG+COdQMjSCGW4QiMHH88xWbvZ+kRVblZsWrkXlABuGdFJ1E9L7HK+T0Yqd4akKNa/lO0+jDxQD4Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.9.tgz", + "integrity": "sha512-RLLdkflmqRG8KanPGOU7Rpg829ZHu8nFy5Pqdi9U01VYtG9Y0zOG6Vr2z4/S+/3zIyOxiK6cCeYNWOFR9QP87g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.9.tgz", + "integrity": "sha512-YaFBlPGeDasft5IIM+CQAhJAqS3St3nJzDEgsgFixcfZeyGPCd6eJBWzke5piZuZ7CtL656eOSYKk4Ls2C0FRQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.9.tgz", + "integrity": "sha512-1MkgTCuvMGWuqVtAvkpkXFmtL8XhWy+j4jaSO2wxfJtilVCi0ZE37b8uOdMItIHz4I6z1bWWtEX4CJwcKYLcuA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.9.tgz", + "integrity": "sha512-4Xd0xNiMVXKh6Fa7HEJQbrpP3m3DDn43jKxMjxLLRjWnRsfxjORYJlXPO4JNcXtOyfajXorRKY9NkOpTHptErg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.9.tgz", + "integrity": "sha512-WjH4s6hzo00nNezhp3wFIAfmGZ8U7KtrJNlFMRKxiI9mxEK1scOMAaa9i4crUtu+tBr+0IN6JCuAcSBJZfnphw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.9.tgz", + "integrity": "sha512-mGFrVJHmZiRqmP8xFOc6b84/7xa5y5YvR1x8djzXpJBSv/UsNK6aqec+6JDjConTgvvQefdGhFDAs2DLAds6gQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.9.tgz", + "integrity": "sha512-b33gLVU2k11nVx1OhX3C8QQP6UHQK4ZtN56oFWvVXvz2VkDoe6fbG8TOgHFxEvqeqohmRnIHe5A1+HADk4OQww==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.9.tgz", + "integrity": "sha512-PPOl1mi6lpLNQxnGoyAfschAodRFYXJ+9fs6WHXz7CSWKbOqiMZsubC+BQsVKuul+3vKLuwTHsS2c2y9EoKwxQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.4" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "license": "MIT" + }, + "node_modules/@formatjs/intl-localematcher": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.6.1.tgz", + "integrity": "sha512-ePEgLgVCqi2BBFnTMWPfIghu6FkbZnnBVhO2sSxvLfrdFw7wCHAHiDoM2h4NRgjbaY7+B7HgOLZGkK187pZTZg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.8.0" + } + }, + "node_modules/@iconify/types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz", + "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==", + "license": "MIT" + }, + "node_modules/@iconify/utils": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@iconify/utils/-/utils-2.3.0.tgz", + "integrity": "sha512-GmQ78prtwYW6EtzXRU1rY+KwOKfz32PD7iJh6Iyqw68GiKuoZ2A6pRtzWONz5VQJbp50mEjXh/7NkumtrAgRKA==", + "license": "MIT", + "dependencies": { + "@antfu/install-pkg": "^1.0.0", + "@antfu/utils": "^8.1.0", + "@iconify/types": "^2.0.0", + "debug": "^4.4.0", + "globals": "^15.14.0", + "kolorist": "^1.8.0", + "local-pkg": "^1.0.0", + "mlly": "^1.7.4" + } + }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.3.tgz", + "integrity": "sha512-ryFMfvxxpQRsgZJqBd4wsttYQbCxsJksrv9Lw/v798JcQ8+w84mBWuXwl+TT0WJ/WrYOLaYpwQXi3sA9nTIaIg==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.2.0" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.3.tgz", + "integrity": "sha512-yHpJYynROAj12TA6qil58hmPmAwxKKC7reUqtGLzsOHfP7/rniNGTL8tjWX6L3CTV4+5P4ypcS7Pp+7OB+8ihA==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.2.0" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.0.tgz", + "integrity": "sha512-sBZmpwmxqwlqG9ueWFXtockhsxefaV6O84BMOrhtg/YqbTaRdqDE7hxraVE3y6gVM4eExmfzW4a8el9ArLeEiQ==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.0.tgz", + "integrity": "sha512-M64XVuL94OgiNHa5/m2YvEQI5q2cl9d/wk0qFTDVXcYzi43lxuiFTftMR1tOnFQovVXNZJ5TURSDK2pNe9Yzqg==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.0.tgz", + "integrity": "sha512-mWd2uWvDtL/nvIzThLq3fr2nnGfyr/XMXlq8ZJ9WMR6PXijHlC3ksp0IpuhK6bougvQrchUAfzRLnbsen0Cqvw==", + "cpu": [ + "arm" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.0.tgz", + "integrity": "sha512-RXwd0CgG+uPRX5YYrkzKyalt2OJYRiJQ8ED/fi1tq9WQW2jsQIn0tqrlR5l5dr/rjqq6AHAxURhj2DVjyQWSOA==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-ppc64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.0.tgz", + "integrity": "sha512-Xod/7KaDDHkYu2phxxfeEPXfVXFKx70EAFZ0qyUdOjCcxbjqyJOEUpDe6RIyaunGxT34Anf9ue/wuWOqBW2WcQ==", + "cpu": [ + "ppc64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.0.tgz", + "integrity": "sha512-eMKfzDxLGT8mnmPJTNMcjfO33fLiTDsrMlUVcp6b96ETbnJmd4uvZxVJSKPQfS+odwfVaGifhsB07J1LynFehw==", + "cpu": [ + "s390x" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.0.tgz", + "integrity": "sha512-ZW3FPWIc7K1sH9E3nxIGB3y3dZkpJlMnkk7z5tu1nSkBoCgw2nSRTFHI5pB/3CQaJM0pdzMF3paf9ckKMSE9Tg==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.0.tgz", + "integrity": "sha512-UG+LqQJbf5VJ8NWJ5Z3tdIe/HXjuIdo4JeVNADXBFuG7z9zjoegpzzGIyV5zQKi4zaJjnAd2+g2nna8TZvuW9Q==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.0.tgz", + "integrity": "sha512-SRYOLR7CXPgNze8akZwjoGBoN1ThNZoqpOgfnOxmWsklTGVfJiGJoC/Lod7aNMGA1jSsKWM1+HRX43OP6p9+6Q==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.3.tgz", + "integrity": "sha512-oBK9l+h6KBN0i3dC8rYntLiVfW8D8wH+NPNT3O/WBHeW0OQWCjfWksLUaPidsrDKpJgXp3G3/hkmhptAW0I3+A==", + "cpu": [ + "arm" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.2.0" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.3.tgz", + "integrity": "sha512-QdrKe3EvQrqwkDrtuTIjI0bu6YEJHTgEeqdzI3uWJOH6G1O8Nl1iEeVYRGdj1h5I21CqxSvQp1Yv7xeU3ZewbA==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.2.0" + } + }, + "node_modules/@img/sharp-linux-ppc64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.3.tgz", + "integrity": "sha512-GLtbLQMCNC5nxuImPR2+RgrviwKwVql28FWZIW1zWruy6zLgA5/x2ZXk3mxj58X/tszVF69KK0Is83V8YgWhLA==", + "cpu": [ + "ppc64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-ppc64": "1.2.0" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.3.tgz", + "integrity": "sha512-3gahT+A6c4cdc2edhsLHmIOXMb17ltffJlxR0aC2VPZfwKoTGZec6u5GrFgdR7ciJSsHT27BD3TIuGcuRT0KmQ==", + "cpu": [ + "s390x" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.2.0" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.3.tgz", + "integrity": "sha512-8kYso8d806ypnSq3/Ly0QEw90V5ZoHh10yH0HnrzOCr6DKAPI6QVHvwleqMkVQ0m+fc7EH8ah0BB0QPuWY6zJQ==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.2.0" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.3.tgz", + "integrity": "sha512-vAjbHDlr4izEiXM1OTggpCcPg9tn4YriK5vAjowJsHwdBIdx0fYRsURkxLG2RLm9gyBq66gwtWI8Gx0/ov+JKQ==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.2.0" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.3.tgz", + "integrity": "sha512-gCWUn9547K5bwvOn9l5XGAEjVTTRji4aPTqLzGXHvIr6bIDZKNTA34seMPgM0WmSf+RYBH411VavCejp3PkOeQ==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.2.0" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.3.tgz", + "integrity": "sha512-+CyRcpagHMGteySaWos8IbnXcHgfDn7pO2fiC2slJxvNq9gDipYBN42/RagzctVRKgxATmfqOSulgZv5e1RdMg==", + "cpu": [ + "wasm32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.4.4" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-arm64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.3.tgz", + "integrity": "sha512-MjnHPnbqMXNC2UgeLJtX4XqoVHHlZNd+nPt1kRPmj63wURegwBhZlApELdtxM2OIZDRv/DFtLcNhVbd1z8GYXQ==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.3.tgz", + "integrity": "sha512-xuCdhH44WxuXgOM714hn4amodJMZl3OEvf0GVTm0BEyMeA2to+8HEdRPShH0SLYptJY1uBw+SCFP9WVQi1Q/cw==", + "cpu": [ + "ia32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.3.tgz", + "integrity": "sha512-OWwz05d++TxzLEv4VnsTz5CmZ6mI6S05sfQGEMrNrQcOEERbX46332IvE7pO/EUiw7jUrrS40z/M7kPyjfl04g==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.30", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.30.tgz", + "integrity": "sha512-GQ7Nw5G2lTu/BtHTKfXhKHok2WGetd4XYcVKGx00SjAk8GMwgJM3zr6zORiPGuOE+/vkc90KtTosSSvaCjKb2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@mdx-js/mdx": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.0.tgz", + "integrity": "sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "recma-build-jsx": "^1.0.0", + "recma-jsx": "^1.0.0", + "recma-stringify": "^1.0.0", + "rehype-recma": "^1.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mermaid-js/parser": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-0.6.2.tgz", + "integrity": "sha512-+PO02uGF6L6Cs0Bw8RpGhikVvMWEysfAyl27qTlroUB8jSWr1lL0Sf6zi78ZxlSnmgSY2AMMKVgghnN9jTtwkQ==", + "license": "MIT", + "dependencies": { + "langium": "3.3.1" + } + }, + "node_modules/@next/env": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.3.3.tgz", + "integrity": "sha512-OdiMrzCl2Xi0VTjiQQUK0Xh7bJHnOuET2s+3V+Y40WJBAXrJeGA3f+I8MZJ/YQ3mVGi5XGR1L66oFlgqXhQ4Vw==", + "license": "MIT" + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.3.3.tgz", + "integrity": "sha512-WRJERLuH+O3oYB4yZNVahSVFmtxRNjNF1I1c34tYMoJb0Pve+7/RaLAJJizyYiFhjYNGHRAE1Ri2Fd23zgDqhg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.3.3.tgz", + "integrity": "sha512-XHdzH/yBc55lu78k/XwtuFR/ZXUTcflpRXcsu0nKmF45U96jt1tsOZhVrn5YH+paw66zOANpOnFQ9i6/j+UYvw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.3.3.tgz", + "integrity": "sha512-VZ3sYL2LXB8znNGcjhocikEkag/8xiLgnvQts41tq6i+wql63SMS1Q6N8RVXHw5pEUjiof+II3HkDd7GFcgkzw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.3.3.tgz", + "integrity": "sha512-h6Y1fLU4RWAp1HPNJWDYBQ+e3G7sLckyBXhmH9ajn8l/RSMnhbuPBV/fXmy3muMcVwoJdHL+UtzRzs0nXOf9SA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.3.3.tgz", + "integrity": "sha512-jJ8HRiF3N8Zw6hGlytCj5BiHyG/K+fnTKVDEKvUCyiQ/0r5tgwO7OgaRiOjjRoIx2vwLR+Rz8hQoPrnmFbJdfw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.3.3.tgz", + "integrity": "sha512-HrUcTr4N+RgiiGn3jjeT6Oo208UT/7BuTr7K0mdKRBtTbT4v9zJqCDKO97DUqqoBK1qyzP1RwvrWTvU6EPh/Cw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.3.3.tgz", + "integrity": "sha512-SxorONgi6K7ZUysMtRF3mIeHC5aA3IQLmKFQzU0OuhuUYwpOBc1ypaLJLP5Bf3M9k53KUUUj4vTPwzGvl/NwlQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.3.3.tgz", + "integrity": "sha512-4QZG6F8enl9/S2+yIiOiju0iCTFd93d8VC1q9LZS4p/Xuk81W2QDjCFeoogmrWWkAD59z8ZxepBQap2dKS5ruw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@orama/orama": { + "version": "3.1.11", + "resolved": "https://registry.npmjs.org/@orama/orama/-/orama-3.1.11.tgz", + "integrity": "sha512-Szki0cgFiXE5F9RLx2lUyEtJllnuCSQ4B8RLDwIjXkVit6qZjoDAxH+xhJs29MjKLDz0tbPLdKFa6QrQ/qoGGA==", + "license": "Apache-2.0", + "engines": { + "node": ">= 20.0.0" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-accordion": { + "version": "1.2.12", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accordion/-/react-accordion-1.2.12.tgz", + "integrity": "sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collapsible": "1.1.12", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collapsible": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.12.tgz", + "integrity": "sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", + "integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-navigation-menu": { + "version": "1.2.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-navigation-menu/-/react-navigation-menu-1.2.14.tgz", + "integrity": "sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.15.tgz", + "integrity": "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.10.tgz", + "integrity": "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz", + "integrity": "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@shikijs/core": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.11.0.tgz", + "integrity": "sha512-oJwU+DxGqp6lUZpvtQgVOXNZcVsirN76tihOLBmwILkKuRuwHteApP8oTXmL4tF5vS5FbOY0+8seXmiCoslk4g==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.11.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.5" + } + }, + "node_modules/@shikijs/engine-javascript": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.11.0.tgz", + "integrity": "sha512-6/ov6pxrSvew13k9ztIOnSBOytXeKs5kfIR7vbhdtVRg+KPzvp2HctYGeWkqv7V6YIoLicnig/QF3iajqyElZA==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.11.0", + "@shikijs/vscode-textmate": "^10.0.2", + "oniguruma-to-es": "^4.3.3" + } + }, + "node_modules/@shikijs/engine-oniguruma": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.11.0.tgz", + "integrity": "sha512-4DwIjIgETK04VneKbfOE4WNm4Q7WC1wo95wv82PoHKdqX4/9qLRUwrfKlmhf0gAuvT6GHy0uc7t9cailk6Tbhw==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.11.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@shikijs/langs": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.11.0.tgz", + "integrity": "sha512-Njg/nFL4HDcf/ObxcK2VeyidIq61EeLmocrwTHGGpOQx0BzrPWM1j55XtKQ1LvvDWH15cjQy7rg96aJ1/l63uw==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.11.0" + } + }, + "node_modules/@shikijs/rehype": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/rehype/-/rehype-3.11.0.tgz", + "integrity": "sha512-4njEqPBoFn+fsQqSOEw8OjiIIg52jBM0I2qRC1NALFFmQm97qZuQvP570RI94HvAfzCT6agG6ZFBPofrNZ4tlQ==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.11.0", + "@types/hast": "^3.0.4", + "hast-util-to-string": "^3.0.1", + "shiki": "3.11.0", + "unified": "^11.0.5", + "unist-util-visit": "^5.0.0" + } + }, + "node_modules/@shikijs/themes": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.11.0.tgz", + "integrity": "sha512-BhhWRzCTEk2CtWt4S4bgsOqPJRkapvxdsifAwqP+6mk5uxboAQchc0etiJ0iIasxnMsb764qGD24DK9albcU9Q==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.11.0" + } + }, + "node_modules/@shikijs/transformers": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/transformers/-/transformers-3.11.0.tgz", + "integrity": "sha512-fhSpVoq0FoCtKbBpzE3mXcIbr0b7ozFDSSWiVjWrQy+wrOfaFfwxgJqh8kY3Pbv/i+4pcuMIVismLD2MfO62eQ==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "3.11.0", + "@shikijs/types": "3.11.0" + } + }, + "node_modules/@shikijs/types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.11.0.tgz", + "integrity": "sha512-RB7IMo2E7NZHyfkqAuaf4CofyY8bPzjWPjJRzn6SEak3b46fIQyG6Vx5fG/obqkfppQ+g8vEsiD7Uc6lqQt32Q==", + "license": "MIT", + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@shikijs/vscode-textmate": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz", + "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==", + "license": "MIT" + }, + "node_modules/@standard-schema/spec": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", + "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==", + "license": "MIT" + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", + "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.8.0" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.12.tgz", + "integrity": "sha512-3hm9brwvQkZFe++SBt+oLjo4OLDtkvlE8q2WalaD/7QWaeM7KEJbAiY/LJZUaCs7Xa8aUu4xy3uoyX4q54UVdQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "enhanced-resolve": "^5.18.3", + "jiti": "^2.5.1", + "lightningcss": "1.30.1", + "magic-string": "^0.30.17", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.12" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.12.tgz", + "integrity": "sha512-gM5EoKHW/ukmlEtphNwaGx45fGoEmP10v51t9unv55voWh6WrOL19hfuIdo2FjxIaZzw776/BUQg7Pck++cIVw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.4", + "tar": "^7.4.3" + }, + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.12", + "@tailwindcss/oxide-darwin-arm64": "4.1.12", + "@tailwindcss/oxide-darwin-x64": "4.1.12", + "@tailwindcss/oxide-freebsd-x64": "4.1.12", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.12", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.12", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.12", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.12", + "@tailwindcss/oxide-linux-x64-musl": "4.1.12", + "@tailwindcss/oxide-wasm32-wasi": "4.1.12", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.12", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.12" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.12.tgz", + "integrity": "sha512-oNY5pq+1gc4T6QVTsZKwZaGpBb2N1H1fsc1GD4o7yinFySqIuRZ2E4NvGasWc6PhYJwGK2+5YT1f9Tp80zUQZQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.12.tgz", + "integrity": "sha512-cq1qmq2HEtDV9HvZlTtrj671mCdGB93bVY6J29mwCyaMYCP/JaUBXxrQQQm7Qn33AXXASPUb2HFZlWiiHWFytw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.12.tgz", + "integrity": "sha512-6UCsIeFUcBfpangqlXay9Ffty9XhFH1QuUFn0WV83W8lGdX8cD5/+2ONLluALJD5+yJ7k8mVtwy3zMZmzEfbLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.12.tgz", + "integrity": "sha512-JOH/f7j6+nYXIrHobRYCtoArJdMJh5zy5lr0FV0Qu47MID/vqJAY3r/OElPzx1C/wdT1uS7cPq+xdYYelny1ww==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.12.tgz", + "integrity": "sha512-v4Ghvi9AU1SYgGr3/j38PD8PEe6bRfTnNSUE3YCMIRrrNigCFtHZ2TCm8142X8fcSqHBZBceDx+JlFJEfNg5zQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.12.tgz", + "integrity": "sha512-YP5s1LmetL9UsvVAKusHSyPlzSRqYyRB0f+Kl/xcYQSPLEw/BvGfxzbH+ihUciePDjiXwHh+p+qbSP3SlJw+6g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.12.tgz", + "integrity": "sha512-V8pAM3s8gsrXcCv6kCHSuwyb/gPsd863iT+v1PGXC4fSL/OJqsKhfK//v8P+w9ThKIoqNbEnsZqNy+WDnwQqCA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.12.tgz", + "integrity": "sha512-xYfqYLjvm2UQ3TZggTGrwxjYaLB62b1Wiysw/YE3Yqbh86sOMoTn0feF98PonP7LtjsWOWcXEbGqDL7zv0uW8Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.12.tgz", + "integrity": "sha512-ha0pHPamN+fWZY7GCzz5rKunlv9L5R8kdh+YNvP5awe3LtuXb5nRi/H27GeL2U+TdhDOptU7T6Is7mdwh5Ar3A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.12.tgz", + "integrity": "sha512-4tSyu3dW+ktzdEpuk6g49KdEangu3eCYoqPhWNsZgUhyegEda3M9rG0/j1GV/JjVVsj+lG7jWAyrTlLzd/WEBg==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.4.5", + "@emnapi/runtime": "^1.4.5", + "@emnapi/wasi-threads": "^1.0.4", + "@napi-rs/wasm-runtime": "^0.2.12", + "@tybys/wasm-util": "^0.10.0", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.12.tgz", + "integrity": "sha512-iGLyD/cVP724+FGtMWslhcFyg4xyYyM+5F4hGvKA7eifPkXHRAUDFaimu53fpNg9X8dfP75pXx/zFt/jlNF+lg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.12.tgz", + "integrity": "sha512-NKIh5rzw6CpEodv/++r0hGLlfgT/gFN+5WNdZtvh6wpU2BpGNgdjvj6H2oFc8nCM839QM1YOhjpgbAONUb4IxA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/postcss": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.12.tgz", + "integrity": "sha512-5PpLYhCAwf9SJEeIsSmCDLgyVfdBhdBpzX1OJ87anT9IVR0Z9pjM0FNixCAUAHGnMBGB8K99SwAheXrT0Kh6QQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "@tailwindcss/node": "4.1.12", + "@tailwindcss/oxide": "4.1.12", + "postcss": "^8.4.41", + "tailwindcss": "4.1.12" + } + }, + "node_modules/@types/d3": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-delaunay": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", + "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==", + "license": "MIT" + }, + "node_modules/@types/d3-axis": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-contour": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==", + "license": "MIT" + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz", + "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", + "license": "MIT", + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", + "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", + "license": "MIT" + }, + "node_modules/@types/d3-format": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==", + "license": "MIT" + }, + "node_modules/@types/d3-geo": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", + "license": "MIT", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==", + "license": "MIT" + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==", + "license": "MIT" + }, + "node_modules/@types/d3-random": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==", + "license": "MIT" + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz", + "integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/geojson": { + "version": "7946.0.16", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz", + "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.15.28", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.15.28.tgz", + "integrity": "sha512-I0okKVDmyKR281I0UIFV7EWAWRnR0gkuSKob5wVcByyyhr7Px/slhkQapcYX4u00ekzNWaS1gznKZnuzxwo4pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/react": { + "version": "19.1.11", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.11.tgz", + "integrity": "sha512-lr3jdBw/BGj49Eps7EvqlUaoeA0xpj3pc0RoJkHpYaCHkVK7i28dKyImLQb3JVlqs3aYSXf7qYuWOW/fgZnTXQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.1.8", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.8.tgz", + "integrity": "sha512-xG7xaBMJCpcK0RpN8jDbAACQo54ycO6h4dSSmgv8+fu6ZIAdANkx/WsawASUjVXYfy+J9AbUpRMNNEsXCDfDBQ==", + "devOptional": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.0.0" + } + }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "license": "MIT", + "optional": true + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/aria-hidden": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.6.tgz", + "integrity": "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "license": "MIT", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001737", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001737.tgz", + "integrity": "sha512-BiloLiXtQNrY5UyF0+1nSJLXUENuhka2pzy2Fx5pGxqavdrxSCW4U6Pn/PoG3Efspi2frRbHpBV2XsrPE6EDlw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chevrotain": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.0.3.tgz", + "integrity": "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/cst-dts-gen": "11.0.3", + "@chevrotain/gast": "11.0.3", + "@chevrotain/regexp-to-ast": "11.0.3", + "@chevrotain/types": "11.0.3", + "@chevrotain/utils": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/chevrotain-allstar": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/chevrotain-allstar/-/chevrotain-allstar-0.3.1.tgz", + "integrity": "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==", + "license": "MIT", + "dependencies": { + "lodash-es": "^4.17.21" + }, + "peerDependencies": { + "chevrotain": "^11.0.0" + } + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT", + "optional": true + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/compute-scroll-into-view": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.1.1.tgz", + "integrity": "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==", + "license": "MIT" + }, + "node_modules/confbox": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.2.2.tgz", + "integrity": "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==", + "license": "MIT" + }, + "node_modules/cose-base": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", + "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "license": "MIT", + "dependencies": { + "layout-base": "^1.0.0" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/cytoscape": { + "version": "3.33.1", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.33.1.tgz", + "integrity": "sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/cytoscape-cose-bilkent": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", + "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^1.0.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", + "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^2.2.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/cose-base": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", + "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", + "license": "MIT", + "dependencies": { + "layout-base": "^2.0.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/layout-base": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", + "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==", + "license": "MIT" + }, + "node_modules/d3": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", + "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "license": "ISC", + "dependencies": { + "d3-array": "3", + "d3-axis": "3", + "d3-brush": "3", + "d3-chord": "3", + "d3-color": "3", + "d3-contour": "4", + "d3-delaunay": "6", + "d3-dispatch": "3", + "d3-drag": "3", + "d3-dsv": "3", + "d3-ease": "3", + "d3-fetch": "3", + "d3-force": "3", + "d3-format": "3", + "d3-geo": "3", + "d3-hierarchy": "3", + "d3-interpolate": "3", + "d3-path": "3", + "d3-polygon": "3", + "d3-quadtree": "3", + "d3-random": "3", + "d3-scale": "4", + "d3-scale-chromatic": "3", + "d3-selection": "3", + "d3-shape": "3", + "d3-time": "3", + "d3-time-format": "4", + "d3-timer": "3", + "d3-transition": "3", + "d3-zoom": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-axis": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", + "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-brush": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", + "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "3", + "d3-transition": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-chord": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", + "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "license": "ISC", + "dependencies": { + "d3-path": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-contour": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", + "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "license": "ISC", + "dependencies": { + "d3-array": "^3.2.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "license": "ISC", + "dependencies": { + "delaunator": "5" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "license": "ISC", + "dependencies": { + "commander": "7", + "iconv-lite": "0.6", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json.js", + "csv2tsv": "bin/dsv2dsv.js", + "dsv2dsv": "bin/dsv2dsv.js", + "dsv2json": "bin/dsv2json.js", + "json2csv": "bin/json2dsv.js", + "json2dsv": "bin/json2dsv.js", + "json2tsv": "bin/json2dsv.js", + "tsv2csv": "bin/dsv2dsv.js", + "tsv2json": "bin/dsv2json.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-fetch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", + "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", + "license": "ISC", + "dependencies": { + "d3-dsv": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", + "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-geo": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", + "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2.5.0 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-polygon": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", + "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-random": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-sankey": { + "version": "0.12.3", + "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz", + "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "1 - 2", + "d3-shape": "^1.2.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-array": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz", + "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", + "license": "BSD-3-Clause", + "dependencies": { + "internmap": "^1.0.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==", + "license": "BSD-3-Clause" + }, + "node_modules/d3-sankey/node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-path": "1" + } + }, + "node_modules/d3-sankey/node_modules/internmap": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz", + "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==", + "license": "ISC" + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-interpolate": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dagre-d3-es": { + "version": "7.0.11", + "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.11.tgz", + "integrity": "sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw==", + "license": "MIT", + "dependencies": { + "d3": "^7.9.0", + "lodash-es": "^4.17.21" + } + }, + "node_modules/dayjs": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", + "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/delaunator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", + "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "license": "ISC", + "dependencies": { + "robust-predicates": "^3.0.2" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/detect-libc": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", + "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==", + "devOptional": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + "license": "MIT" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dompurify": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz", + "integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==", + "license": "(MPL-2.0 OR Apache-2.0)", + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.3", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.3.tgz", + "integrity": "sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/esast-util-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", + "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esast-util-from-js": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz", + "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "acorn": "^8.0.0", + "esast-util-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esbuild": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.9.tgz", + "integrity": "sha512-CRbODhYyQx3qp7ZEwzxOk4JBqmD/seJrzPa/cGjY1VtIn5E09Oi9/dB4JwctnfZ8Q8iT7rioVv5k/FNT/uf54g==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.9", + "@esbuild/android-arm": "0.25.9", + "@esbuild/android-arm64": "0.25.9", + "@esbuild/android-x64": "0.25.9", + "@esbuild/darwin-arm64": "0.25.9", + "@esbuild/darwin-x64": "0.25.9", + "@esbuild/freebsd-arm64": "0.25.9", + "@esbuild/freebsd-x64": "0.25.9", + "@esbuild/linux-arm": "0.25.9", + "@esbuild/linux-arm64": "0.25.9", + "@esbuild/linux-ia32": "0.25.9", + "@esbuild/linux-loong64": "0.25.9", + "@esbuild/linux-mips64el": "0.25.9", + "@esbuild/linux-ppc64": "0.25.9", + "@esbuild/linux-riscv64": "0.25.9", + "@esbuild/linux-s390x": "0.25.9", + "@esbuild/linux-x64": "0.25.9", + "@esbuild/netbsd-arm64": "0.25.9", + "@esbuild/netbsd-x64": "0.25.9", + "@esbuild/openbsd-arm64": "0.25.9", + "@esbuild/openbsd-x64": "0.25.9", + "@esbuild/openharmony-arm64": "0.25.9", + "@esbuild/sunos-x64": "0.25.9", + "@esbuild/win32-arm64": "0.25.9", + "@esbuild/win32-ia32": "0.25.9", + "@esbuild/win32-x64": "0.25.9" + } + }, + "node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-scope": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz", + "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.4.0.tgz", + "integrity": "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/exsolve": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/exsolve/-/exsolve-1.0.7.tgz", + "integrity": "sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==", + "license": "MIT" + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "license": "MIT", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fumadocs-core": { + "version": "15.5.1", + "resolved": "https://registry.npmjs.org/fumadocs-core/-/fumadocs-core-15.5.1.tgz", + "integrity": "sha512-5eJPJw+BFWFdgrtWPQ9aAZAhhsyuZAwth8OjBd9R77sXoIoae4Y4lJZMq3BeSpJZcuIAOVbSCS+pJhsBAoXJ8g==", + "license": "MIT", + "dependencies": { + "@formatjs/intl-localematcher": "^0.6.1", + "@orama/orama": "^3.1.6", + "@shikijs/rehype": "^3.4.2", + "@shikijs/transformers": "^3.4.2", + "github-slugger": "^2.0.0", + "hast-util-to-estree": "^3.1.3", + "hast-util-to-jsx-runtime": "^2.3.6", + "image-size": "^2.0.2", + "negotiator": "^1.0.0", + "react-remove-scroll": "^2.6.3", + "remark": "^15.0.0", + "remark-gfm": "^4.0.1", + "remark-rehype": "^11.1.2", + "scroll-into-view-if-needed": "^3.1.0", + "shiki": "^3.4.2", + "unist-util-visit": "^5.0.0" + }, + "peerDependencies": { + "@oramacloud/client": "1.x.x || 2.x.x", + "algoliasearch": "5.x.x", + "next": "14.x.x || 15.x.x", + "react": "18.x.x || 19.x.x", + "react-dom": "18.x.x || 19.x.x" + }, + "peerDependenciesMeta": { + "@oramacloud/client": { + "optional": true + }, + "algoliasearch": { + "optional": true + }, + "next": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/fumadocs-mdx": { + "version": "11.6.7", + "resolved": "https://registry.npmjs.org/fumadocs-mdx/-/fumadocs-mdx-11.6.7.tgz", + "integrity": "sha512-jOZzxowvhwe9RzV6jVjIS2FsQIz9P6QYkMBPgR0nq9+7trP+mmiLoIq5EwhTPrR/Y/4gTiSl9TXFWxTY02trnw==", + "license": "MIT", + "dependencies": { + "@mdx-js/mdx": "^3.1.0", + "@standard-schema/spec": "^1.0.0", + "chokidar": "^4.0.3", + "esbuild": "^0.25.5", + "estree-util-value-to-estree": "^3.4.0", + "gray-matter": "^4.0.3", + "js-yaml": "^4.1.0", + "lru-cache": "^11.1.0", + "picocolors": "^1.1.1", + "tinyexec": "^1.0.1", + "tinyglobby": "^0.2.14", + "unist-util-visit": "^5.0.0", + "zod": "^3.25.42" + }, + "bin": { + "fumadocs-mdx": "bin.js" + }, + "peerDependencies": { + "@fumadocs/mdx-remote": "^1.2.0", + "fumadocs-core": "^14.0.0 || ^15.0.0", + "next": "^15.3.0" + }, + "peerDependenciesMeta": { + "@fumadocs/mdx-remote": { + "optional": true + } + } + }, + "node_modules/fumadocs-ui": { + "version": "15.5.1", + "resolved": "https://registry.npmjs.org/fumadocs-ui/-/fumadocs-ui-15.5.1.tgz", + "integrity": "sha512-HyMoM+mv5WZrXDAv88SLLqFrduDSxQHFU+uQkSpJQdycaGNSIB8063PW/wb/QIliusWP8o+c/YLFy/29KymEWA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-accordion": "^1.2.11", + "@radix-ui/react-collapsible": "^1.1.11", + "@radix-ui/react-dialog": "^1.1.14", + "@radix-ui/react-direction": "^1.1.1", + "@radix-ui/react-navigation-menu": "^1.2.13", + "@radix-ui/react-popover": "^1.1.14", + "@radix-ui/react-presence": "^1.1.4", + "@radix-ui/react-scroll-area": "^1.2.9", + "@radix-ui/react-slot": "^1.2.3", + "@radix-ui/react-tabs": "^1.1.12", + "class-variance-authority": "^0.7.1", + "fumadocs-core": "15.5.1", + "lodash.merge": "^4.6.2", + "next-themes": "^0.4.6", + "postcss-selector-parser": "^7.1.0", + "react-medium-image-zoom": "^5.2.14", + "react-remove-scroll": "^2.6.3", + "tailwind-merge": "^3.3.0" + }, + "peerDependencies": { + "next": "14.x.x || 15.x.x", + "react": "18.x.x || 19.x.x", + "react-dom": "18.x.x || 19.x.x", + "tailwindcss": "^3.4.14 || ^4.0.0" + }, + "peerDependenciesMeta": { + "tailwindcss": { + "optional": true + } + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/github-slugger": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz", + "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==", + "license": "ISC" + }, + "node_modules/globals": { + "version": "15.15.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.15.0.tgz", + "integrity": "sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "license": "MIT", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/hachure-fill": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/hachure-fill/-/hachure-fill-0.5.2.tgz", + "integrity": "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==", + "license": "MIT" + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz", + "integrity": "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz", + "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-string": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-3.0.1.tgz", + "integrity": "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/image-size": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-2.0.2.tgz", + "integrity": "sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==", + "license": "MIT", + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=16.x" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==", + "license": "MIT" + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", + "license": "MIT", + "optional": true + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jiti": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.5.1.tgz", + "integrity": "sha512-twQoecYPiVA5K/h6SxtORw/Bs3ar+mLUtoPSc7iMXzQzK8d7eJ/R09wmTwAjiamETn1cXYPGfNnu7DMoHgu12w==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/katex": { + "version": "0.16.22", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.22.tgz", + "integrity": "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/katex/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/khroma": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", + "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kolorist": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", + "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==", + "license": "MIT" + }, + "node_modules/langium": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/langium/-/langium-3.3.1.tgz", + "integrity": "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==", + "license": "MIT", + "dependencies": { + "chevrotain": "~11.0.3", + "chevrotain-allstar": "~0.3.0", + "vscode-languageserver": "~9.0.1", + "vscode-languageserver-textdocument": "~1.0.11", + "vscode-uri": "~3.0.8" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/layout-base": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", + "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==", + "license": "MIT" + }, + "node_modules/lightningcss": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.1.tgz", + "integrity": "sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.30.1", + "lightningcss-darwin-x64": "1.30.1", + "lightningcss-freebsd-x64": "1.30.1", + "lightningcss-linux-arm-gnueabihf": "1.30.1", + "lightningcss-linux-arm64-gnu": "1.30.1", + "lightningcss-linux-arm64-musl": "1.30.1", + "lightningcss-linux-x64-gnu": "1.30.1", + "lightningcss-linux-x64-musl": "1.30.1", + "lightningcss-win32-arm64-msvc": "1.30.1", + "lightningcss-win32-x64-msvc": "1.30.1" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.1.tgz", + "integrity": "sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.1.tgz", + "integrity": "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.1.tgz", + "integrity": "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.1.tgz", + "integrity": "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.1.tgz", + "integrity": "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.1.tgz", + "integrity": "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.1.tgz", + "integrity": "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.1.tgz", + "integrity": "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.1.tgz", + "integrity": "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.1.tgz", + "integrity": "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/local-pkg": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-1.1.2.tgz", + "integrity": "sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A==", + "license": "MIT", + "dependencies": { + "mlly": "^1.7.4", + "pkg-types": "^2.3.0", + "quansync": "^0.2.11" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lru-cache": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.1.0.tgz", + "integrity": "sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A==", + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/lucide-react": { + "version": "0.525.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.525.0.tgz", + "integrity": "sha512-Tm1txJ2OkymCGkvwoHt33Y2JpN5xucVq1slHcgE6Lk0WjDfjgKWor5CdVER8U6DvcfMwh4M8XxmpTiyzfmfDYQ==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.18", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.18.tgz", + "integrity": "sha512-yi8swmWbO17qHhwIBNeeZxTceJMeBvWJaId6dyvTSOwTipqeHhMhOrz6513r1sOKnpvQ7zkhlG8tPrpilwTxHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/marked": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-16.2.0.tgz", + "integrity": "sha512-LbbTuye+0dWRz2TS9KJ7wsnD4KAtpj0MVkWc90XvBa6AslXsT0hTBVH5k32pcSyHH1fst9XEFJunXHktVy0zlg==", + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mermaid": { + "version": "11.10.1", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.10.1.tgz", + "integrity": "sha512-0PdeADVWURz7VMAX0+MiMcgfxFKY4aweSGsjgFihe3XlMKNqmai/cugMrqTd3WNHM93V+K+AZL6Wu6tB5HmxRw==", + "license": "MIT", + "dependencies": { + "@braintree/sanitize-url": "^7.0.4", + "@iconify/utils": "^2.1.33", + "@mermaid-js/parser": "^0.6.2", + "@types/d3": "^7.4.3", + "cytoscape": "^3.29.3", + "cytoscape-cose-bilkent": "^4.1.0", + "cytoscape-fcose": "^2.2.0", + "d3": "^7.9.0", + "d3-sankey": "^0.12.3", + "dagre-d3-es": "7.0.11", + "dayjs": "^1.11.13", + "dompurify": "^3.2.5", + "katex": "^0.16.22", + "khroma": "^2.1.0", + "lodash-es": "^4.17.21", + "marked": "^16.0.0", + "roughjs": "^4.6.6", + "stylis": "^4.3.6", + "ts-dedent": "^2.2.0", + "uuid": "^11.1.0" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz", + "integrity": "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz", + "integrity": "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "license": "MIT", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz", + "integrity": "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz", + "integrity": "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/mlly": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.0.tgz", + "integrity": "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==", + "license": "MIT", + "dependencies": { + "acorn": "^8.15.0", + "pathe": "^2.0.3", + "pkg-types": "^1.3.1", + "ufo": "^1.6.1" + } + }, + "node_modules/mlly/node_modules/confbox": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz", + "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==", + "license": "MIT" + }, + "node_modules/mlly/node_modules/pkg-types": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz", + "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==", + "license": "MIT", + "dependencies": { + "confbox": "^0.1.8", + "mlly": "^1.7.4", + "pathe": "^2.0.1" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/next": { + "version": "15.3.3", + "resolved": "https://registry.npmjs.org/next/-/next-15.3.3.tgz", + "integrity": "sha512-JqNj29hHNmCLtNvd090SyRbXJiivQ+58XjCcrC50Crb5g5u2zi7Y2YivbsEfzk6AtVI80akdOQbaMZwWB1Hthw==", + "license": "MIT", + "dependencies": { + "@next/env": "15.3.3", + "@swc/counter": "0.1.3", + "@swc/helpers": "0.5.15", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "postcss": "8.4.31", + "styled-jsx": "5.1.6" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "15.3.3", + "@next/swc-darwin-x64": "15.3.3", + "@next/swc-linux-arm64-gnu": "15.3.3", + "@next/swc-linux-arm64-musl": "15.3.3", + "@next/swc-linux-x64-gnu": "15.3.3", + "@next/swc-linux-x64-musl": "15.3.3", + "@next/swc-win32-arm64-msvc": "15.3.3", + "@next/swc-win32-x64-msvc": "15.3.3", + "sharp": "^0.34.1" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "babel-plugin-react-compiler": "*", + "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "babel-plugin-react-compiler": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next-themes": { + "version": "0.4.6", + "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz", + "integrity": "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/oniguruma-parser": { + "version": "0.12.1", + "resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz", + "integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==", + "license": "MIT" + }, + "node_modules/oniguruma-to-es": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.3.tgz", + "integrity": "sha512-rPiZhzC3wXwE59YQMRDodUwwT9FZ9nNBwQQfsd1wfdtlKEyCdRV0avrTcSZ5xlIvGRVPd/cx6ZN45ECmS39xvg==", + "license": "MIT", + "dependencies": { + "oniguruma-parser": "^0.12.1", + "regex": "^6.0.1", + "regex-recursion": "^6.0.2" + } + }, + "node_modules/package-manager-detector": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.3.0.tgz", + "integrity": "sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ==", + "license": "MIT" + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/path-data-parser": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/path-data-parser/-/path-data-parser-0.1.0.tgz", + "integrity": "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==", + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-types": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-2.3.0.tgz", + "integrity": "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==", + "license": "MIT", + "dependencies": { + "confbox": "^0.2.2", + "exsolve": "^1.0.7", + "pathe": "^2.0.3" + } + }, + "node_modules/points-on-curve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/points-on-curve/-/points-on-curve-0.2.0.tgz", + "integrity": "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==", + "license": "MIT" + }, + "node_modules/points-on-path": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/points-on-path/-/points-on-path-0.2.1.tgz", + "integrity": "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==", + "license": "MIT", + "dependencies": { + "path-data-parser": "0.1.0", + "points-on-curve": "0.2.0" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/quansync": { + "version": "0.2.11", + "resolved": "https://registry.npmjs.org/quansync/-/quansync-0.2.11.tgz", + "integrity": "sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/antfu" + }, + { + "type": "individual", + "url": "https://github.com/sponsors/sxzz" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", + "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", + "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.26.0" + }, + "peerDependencies": { + "react": "^19.1.1" + } + }, + "node_modules/react-medium-image-zoom": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/react-medium-image-zoom/-/react-medium-image-zoom-5.3.0.tgz", + "integrity": "sha512-RCIzVlsKqy3BYgGgYbolUfuvx0aSKC7YhX/IJGEp+WJxsqdIVYJHkBdj++FAj6VD7RiWj6VVmdCfa/9vJE9hZg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/rpearce" + } + ], + "license": "BSD-3-Clause", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.1.tgz", + "integrity": "sha512-HpMh8+oahmIdOuS5aFKKY6Pyog+FNaZV/XyJOq7b4YFwsFHe5yYfdbIalI4k3vU2nSDql7YskmUseHsRrJqIPA==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + "license": "MIT", + "dependencies": { + "react-style-singleton": "^2.2.2", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + "license": "MIT", + "dependencies": { + "get-nonce": "^1.0.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/recma-build-jsx": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz", + "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-jsx": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.1.tgz", + "integrity": "sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==", + "license": "MIT", + "dependencies": { + "acorn-jsx": "^5.0.0", + "estree-util-to-js": "^2.0.0", + "recma-parse": "^1.0.0", + "recma-stringify": "^1.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/recma-parse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz", + "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "esast-util-from-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-stringify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz", + "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-to-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/regex/-/regex-6.0.1.tgz", + "integrity": "sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA==", + "license": "MIT", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-recursion": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-6.0.2.tgz", + "integrity": "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==", + "license": "MIT", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-utilities": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz", + "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==", + "license": "MIT" + }, + "node_modules/rehype-recma": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz", + "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "hast-util-to-estree": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark": { + "version": "15.0.1", + "resolved": "https://registry.npmjs.org/remark/-/remark-15.0.1.tgz", + "integrity": "sha512-Eht5w30ruCXgFmxVUSlNWQ9iiimq07URKeFS3hNc8cUWy1llX4KDWfyEDZRycMc+znsN9Ux5/tJ/BFdgdOwA3A==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.0.tgz", + "integrity": "sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA==", + "license": "MIT", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/robust-predicates": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", + "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==", + "license": "Unlicense" + }, + "node_modules/roughjs": { + "version": "4.6.6", + "resolved": "https://registry.npmjs.org/roughjs/-/roughjs-4.6.6.tgz", + "integrity": "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==", + "license": "MIT", + "dependencies": { + "hachure-fill": "^0.5.2", + "path-data-parser": "^0.1.0", + "points-on-curve": "^0.2.0", + "points-on-path": "^0.2.1" + } + }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==", + "license": "BSD-3-Clause" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", + "license": "MIT" + }, + "node_modules/scroll-into-view-if-needed": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz", + "integrity": "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==", + "license": "MIT", + "dependencies": { + "compute-scroll-into-view": "^3.0.2" + } + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "license": "MIT", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "license": "ISC", + "optional": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/sharp": { + "version": "0.34.3", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.3.tgz", + "integrity": "sha512-eX2IQ6nFohW4DbvHIOLRB3MHFpYqaqvXd3Tp5e/T/dSH83fxaNJQRvDMhASmkNTsNTVF2/OOopzRCt7xokgPfg==", + "hasInstallScript": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "color": "^4.2.3", + "detect-libc": "^2.0.4", + "semver": "^7.7.2" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.34.3", + "@img/sharp-darwin-x64": "0.34.3", + "@img/sharp-libvips-darwin-arm64": "1.2.0", + "@img/sharp-libvips-darwin-x64": "1.2.0", + "@img/sharp-libvips-linux-arm": "1.2.0", + "@img/sharp-libvips-linux-arm64": "1.2.0", + "@img/sharp-libvips-linux-ppc64": "1.2.0", + "@img/sharp-libvips-linux-s390x": "1.2.0", + "@img/sharp-libvips-linux-x64": "1.2.0", + "@img/sharp-libvips-linuxmusl-arm64": "1.2.0", + "@img/sharp-libvips-linuxmusl-x64": "1.2.0", + "@img/sharp-linux-arm": "0.34.3", + "@img/sharp-linux-arm64": "0.34.3", + "@img/sharp-linux-ppc64": "0.34.3", + "@img/sharp-linux-s390x": "0.34.3", + "@img/sharp-linux-x64": "0.34.3", + "@img/sharp-linuxmusl-arm64": "0.34.3", + "@img/sharp-linuxmusl-x64": "0.34.3", + "@img/sharp-wasm32": "0.34.3", + "@img/sharp-win32-arm64": "0.34.3", + "@img/sharp-win32-ia32": "0.34.3", + "@img/sharp-win32-x64": "0.34.3" + } + }, + "node_modules/shiki": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.11.0.tgz", + "integrity": "sha512-VgKumh/ib38I1i3QkMn6mAQA6XjjQubqaAYhfge71glAll0/4xnt8L2oSuC45Qcr/G5Kbskj4RliMQddGmy/Og==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "3.11.0", + "@shikijs/engine-javascript": "3.11.0", + "@shikijs/engine-oniguruma": "3.11.0", + "@shikijs/langs": "3.11.0", + "@shikijs/themes": "3.11.0", + "@shikijs/types": "3.11.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", + "license": "MIT", + "optional": true, + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">= 12" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "license": "BSD-3-Clause" + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/style-to-js": { + "version": "1.1.17", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.17.tgz", + "integrity": "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.9" + } + }, + "node_modules/style-to-object": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.9.tgz", + "integrity": "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", + "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/tailwind-merge": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.3.1.tgz", + "integrity": "sha512-gBXpgUm/3rp1lMZZrM/w7D8GKqshif0zAymAhbCyIt8KMe+0v9DQ7cdYLR4FHH/cKpdTXb+A/tKKU3eolfsI+g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.12.tgz", + "integrity": "sha512-DzFtxOi+7NsFf7DBtI3BJsynR+0Yp6etH+nRPTbpWnS2pZBaSksv/JGctNwSWzbFjp0vxSqknaUylseZqMDGrA==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.3.tgz", + "integrity": "sha512-ZL6DDuAlRlLGghwcfmSn9sK3Hr6ArtyudlSAiCqQ6IfE+b+HHbydbYDIG15IfS5do+7XQQBdBiubF/cV2dnDzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyexec": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.1.tgz", + "integrity": "sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw==", + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.14", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz", + "integrity": "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==", + "license": "MIT", + "dependencies": { + "fdir": "^6.4.4", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "license": "MIT", + "engines": { + "node": ">=6.10" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/typescript": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.1.tgz", + "integrity": "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==", + "license": "MIT" + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + "license": "MIT", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vscode-jsonrpc": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", + "integrity": "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/vscode-languageserver": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz", + "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==", + "license": "MIT", + "dependencies": { + "vscode-languageserver-protocol": "3.17.5" + }, + "bin": { + "installServerIntoExtension": "bin/installServerIntoExtension" + } + }, + "node_modules/vscode-languageserver-protocol": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", + "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==", + "license": "MIT", + "dependencies": { + "vscode-jsonrpc": "8.2.0", + "vscode-languageserver-types": "3.17.5" + } + }, + "node_modules/vscode-languageserver-textdocument": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz", + "integrity": "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==", + "license": "MIT" + }, + "node_modules/vscode-languageserver-types": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", + "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==", + "license": "MIT" + }, + "node_modules/vscode-uri": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.0.8.tgz", + "integrity": "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==", + "license": "MIT" + }, + "node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/docs/public/img/som_box_threshold.png b/docs/public/img/som_box_threshold.png new file mode 100644 index 00000000..80c89b31 Binary files /dev/null and b/docs/public/img/som_box_threshold.png differ diff --git a/docs/public/img/som_iou_threshold.png b/docs/public/img/som_iou_threshold.png new file mode 100644 index 00000000..0fa3c9c1 Binary files /dev/null and b/docs/public/img/som_iou_threshold.png differ diff --git a/docs/src/components/iou.tsx b/docs/src/components/iou.tsx index 275f2c4d..ad342304 100644 --- a/docs/src/components/iou.tsx +++ b/docs/src/components/iou.tsx @@ -1,26 +1,53 @@ 'use client'; import React, { useRef, useEffect, useState, useCallback } from 'react'; +/** + * Represents a rectangle with position, dimensions, styling, and identification + */ interface Rectangle { + /** The x-coordinate of the rectangle's left edge */ left: number; + /** The y-coordinate of the rectangle's top edge */ top: number; + /** The width of the rectangle */ width: number; + /** The height of the rectangle */ height: number; + /** The fill color of the rectangle */ fill: string; + /** The display name of the rectangle */ name: string; } +/** + * Props for the IOU component + */ interface IOUProps { + /** The title to display above the visualization */ title: string; + /** The description text to display below the IOU value */ description: string; + /** The first rectangle for IOU calculation */ rect1: Rectangle; + /** The second rectangle for IOU calculation */ rect2: Rectangle; } +/** + * A React component that visualizes and calculates the Intersection over Union (IOU) + * of two rectangles on a canvas + * @param props - The component props + * @returns The rendered IOU visualization component + */ export default function IOU({ title, description, rect1, rect2 }: IOUProps) { const canvasRef = useRef(null); const [actualIOU, setActualIOU] = useState(0); + /** + * Converts a rectangle to a bounding box with left, right, top, and bottom coordinates + * @param rect - The rectangle to convert + * @returns An object containing the bounding box coordinates + */ const getBbox = (rect: Rectangle) => ({ left: rect.left, right: rect.left + rect.width, @@ -28,6 +55,12 @@ export default function IOU({ title, description, rect1, rect2 }: IOUProps) { bottom: rect.top + rect.height, }); + /** + * Calculates the intersection area between two bounding boxes + * @param bbox1 - The first bounding box + * @param bbox2 - The second bounding box + * @returns The area of intersection between the two bounding boxes + */ const calcIntersection = (bbox1: any, bbox2: any): number => { const x1 = Math.max(bbox1.left, bbox2.left); const x2 = Math.min(bbox1.right, bbox2.right); @@ -43,10 +76,18 @@ export default function IOU({ title, description, rect1, rect2 }: IOUProps) { return intersection; }; + /** + * Calculates the area of a rectangle + * @param rect - The rectangle to calculate area for + * @returns The area of the rectangle + */ const calcArea = (rect: Rectangle): number => { return rect.width * rect.height; }; + /** + * Draws the rectangles on the canvas and calculates the IOU value + */ const drawCanvas = useCallback(() => { const canvas = canvasRef.current; if (!canvas) return; diff --git a/examples/computer-example-ts/.env.example b/examples/computer-example-ts/.env.example index 0496a574..260259a3 100644 --- a/examples/computer-example-ts/.env.example +++ b/examples/computer-example-ts/.env.example @@ -1,3 +1,3 @@ -OPENAI_KEY= -CUA_KEY= +OPENAI_API_KEY= +CUA_API_KEY= CUA_CONTAINER_NAME= \ No newline at end of file diff --git a/examples/computer-example-ts/README.md b/examples/computer-example-ts/README.md index 935588aa..500362c9 100644 --- a/examples/computer-example-ts/README.md +++ b/examples/computer-example-ts/README.md @@ -19,8 +19,8 @@ This example demonstrates how to control a cua Cloud container using the OpenAI 2. **Set up environment variables:** Create a `.env` file with the following variables: - - `OPENAI_KEY` β€” your OpenAI API key - - `CUA_KEY` β€” your cua Cloud API key + - `OPENAI_API_KEY` β€” your OpenAI API key + - `CUA_API_KEY` β€” your cua Cloud API key - `CUA_CONTAINER_NAME` β€” the name of your provisioned container 3. **Run the example:** diff --git a/examples/computer-example-ts/package-lock.json b/examples/computer-example-ts/package-lock.json new file mode 100644 index 00000000..ecee9d6f --- /dev/null +++ b/examples/computer-example-ts/package-lock.json @@ -0,0 +1,841 @@ +{ + "name": "computer-example-ts", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "computer-example-ts", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "@trycua/computer": "^0.1.3", + "dotenv": "^16.5.0", + "openai": "^5.7.0" + }, + "devDependencies": { + "@types/node": "^22.15.33", + "tsx": "^4.20.3", + "typescript": "^5.8.3" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.9.tgz", + "integrity": "sha512-OaGtL73Jck6pBKjNIe24BnFE6agGl+6KxDtTfHhy1HmhthfKouEcOhqpSL64K4/0WCtbKFLOdzD/44cJ4k9opA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.9.tgz", + "integrity": "sha512-5WNI1DaMtxQ7t7B6xa572XMXpHAaI/9Hnhk8lcxF4zVN4xstUgTlvuGDorBguKEnZO70qwEcLpfifMLoxiPqHQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.9.tgz", + "integrity": "sha512-IDrddSmpSv51ftWslJMvl3Q2ZT98fUSL2/rlUXuVqRXHCs5EUF1/f+jbjF5+NG9UffUDMCiTyh8iec7u8RlTLg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.9.tgz", + "integrity": "sha512-I853iMZ1hWZdNllhVZKm34f4wErd4lMyeV7BLzEExGEIZYsOzqDWDf+y082izYUE8gtJnYHdeDpN/6tUdwvfiw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.9.tgz", + "integrity": "sha512-XIpIDMAjOELi/9PB30vEbVMs3GV1v2zkkPnuyRRURbhqjyzIINwj+nbQATh4H9GxUgH1kFsEyQMxwiLFKUS6Rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.9.tgz", + "integrity": "sha512-jhHfBzjYTA1IQu8VyrjCX4ApJDnH+ez+IYVEoJHeqJm9VhG9Dh2BYaJritkYK3vMaXrf7Ogr/0MQ8/MeIefsPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.9.tgz", + "integrity": "sha512-z93DmbnY6fX9+KdD4Ue/H6sYs+bhFQJNCPZsi4XWJoYblUqT06MQUdBCpcSfuiN72AbqeBFu5LVQTjfXDE2A6Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.9.tgz", + "integrity": "sha512-mrKX6H/vOyo5v71YfXWJxLVxgy1kyt1MQaD8wZJgJfG4gq4DpQGpgTB74e5yBeQdyMTbgxp0YtNj7NuHN0PoZg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.9.tgz", + "integrity": "sha512-HBU2Xv78SMgaydBmdor38lg8YDnFKSARg1Q6AT0/y2ezUAKiZvc211RDFHlEZRFNRVhcMamiToo7bDx3VEOYQw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.9.tgz", + "integrity": "sha512-BlB7bIcLT3G26urh5Dmse7fiLmLXnRlopw4s8DalgZ8ef79Jj4aUcYbk90g8iCa2467HX8SAIidbL7gsqXHdRw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.9.tgz", + "integrity": "sha512-e7S3MOJPZGp2QW6AK6+Ly81rC7oOSerQ+P8L0ta4FhVi+/j/v2yZzx5CqqDaWjtPFfYz21Vi1S0auHrap3Ma3A==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.9.tgz", + "integrity": "sha512-Sbe10Bnn0oUAB2AalYztvGcK+o6YFFA/9829PhOCUS9vkJElXGdphz0A3DbMdP8gmKkqPmPcMJmJOrI3VYB1JQ==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.9.tgz", + "integrity": "sha512-YcM5br0mVyZw2jcQeLIkhWtKPeVfAerES5PvOzaDxVtIyZ2NUBZKNLjC5z3/fUlDgT6w89VsxP2qzNipOaaDyA==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.9.tgz", + "integrity": "sha512-++0HQvasdo20JytyDpFvQtNrEsAgNG2CY1CLMwGXfFTKGBGQT3bOeLSYE2l1fYdvML5KUuwn9Z8L1EWe2tzs1w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.9.tgz", + "integrity": "sha512-uNIBa279Y3fkjV+2cUjx36xkx7eSjb8IvnL01eXUKXez/CBHNRw5ekCGMPM0BcmqBxBcdgUWuUXmVWwm4CH9kg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.9.tgz", + "integrity": "sha512-Mfiphvp3MjC/lctb+7D287Xw1DGzqJPb/J2aHHcHxflUo+8tmN/6d4k6I2yFR7BVo5/g7x2Monq4+Yew0EHRIA==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.9.tgz", + "integrity": "sha512-iSwByxzRe48YVkmpbgoxVzn76BXjlYFXC7NvLYq+b+kDjyyk30J0JY47DIn8z1MO3K0oSl9fZoRmZPQI4Hklzg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.9.tgz", + "integrity": "sha512-9jNJl6FqaUG+COdQMjSCGW4QiMHH88xWbvZ+kRVblZsWrkXlABuGdFJ1E9L7HK+T0Yqd4akKNa/lO0+jDxQD4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.9.tgz", + "integrity": "sha512-RLLdkflmqRG8KanPGOU7Rpg829ZHu8nFy5Pqdi9U01VYtG9Y0zOG6Vr2z4/S+/3zIyOxiK6cCeYNWOFR9QP87g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.9.tgz", + "integrity": "sha512-YaFBlPGeDasft5IIM+CQAhJAqS3St3nJzDEgsgFixcfZeyGPCd6eJBWzke5piZuZ7CtL656eOSYKk4Ls2C0FRQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.9.tgz", + "integrity": "sha512-1MkgTCuvMGWuqVtAvkpkXFmtL8XhWy+j4jaSO2wxfJtilVCi0ZE37b8uOdMItIHz4I6z1bWWtEX4CJwcKYLcuA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.9.tgz", + "integrity": "sha512-4Xd0xNiMVXKh6Fa7HEJQbrpP3m3DDn43jKxMjxLLRjWnRsfxjORYJlXPO4JNcXtOyfajXorRKY9NkOpTHptErg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.9.tgz", + "integrity": "sha512-WjH4s6hzo00nNezhp3wFIAfmGZ8U7KtrJNlFMRKxiI9mxEK1scOMAaa9i4crUtu+tBr+0IN6JCuAcSBJZfnphw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.9.tgz", + "integrity": "sha512-mGFrVJHmZiRqmP8xFOc6b84/7xa5y5YvR1x8djzXpJBSv/UsNK6aqec+6JDjConTgvvQefdGhFDAs2DLAds6gQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.9.tgz", + "integrity": "sha512-b33gLVU2k11nVx1OhX3C8QQP6UHQK4ZtN56oFWvVXvz2VkDoe6fbG8TOgHFxEvqeqohmRnIHe5A1+HADk4OQww==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.9.tgz", + "integrity": "sha512-PPOl1mi6lpLNQxnGoyAfschAodRFYXJ+9fs6WHXz7CSWKbOqiMZsubC+BQsVKuul+3vKLuwTHsS2c2y9EoKwxQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@posthog/core": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@posthog/core/-/core-1.0.0.tgz", + "integrity": "sha512-gquQld+duT9DdzLIFoHZkUMW0DZOTSLCtSjuuC/zKFz65Qecbz9p37DHBJMkw0dCuB8Mgh2GtH8Ag3PznJrP3g==", + "license": "MIT" + }, + "node_modules/@trycua/computer": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@trycua/computer/-/computer-0.1.3.tgz", + "integrity": "sha512-RTDgULV6wQJuTsiwhei9aQO6YQSM1TBQqOCDUPHUbTIjtRqzMvMdwtcKAKxZZptzJcBX14bWtbucY65Wu6IEFg==", + "license": "MIT", + "dependencies": { + "@trycua/core": "^0.1.2", + "pino": "^9.7.0", + "ws": "^8.18.0" + } + }, + "node_modules/@trycua/core": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@trycua/core/-/core-0.1.3.tgz", + "integrity": "sha512-sv7BEajJyZ+JNxrOdhao4qCOtRrh+S0XYf64ehAT4UAhLC73Kep06bGa/Uel0Ow5xGXXrg0aiVBL7zO9+/w4/Q==", + "license": "MIT", + "dependencies": { + "@types/uuid": "^10.0.0", + "pino": "^9.7.0", + "posthog-node": "^5.1.1", + "uuid": "^11.1.0" + } + }, + "node_modules/@types/node": { + "version": "22.17.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.17.2.tgz", + "integrity": "sha512-gL6z5N9Jm9mhY+U2KXZpteb+09zyffliRkZyZOHODGATyC5B1Jt/7TzuuiLkFsSUMLbS1OLmlj/E+/3KF4Q/4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "license": "MIT" + }, + "node_modules/atomic-sleep": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/atomic-sleep/-/atomic-sleep-1.0.0.tgz", + "integrity": "sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==", + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/esbuild": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.9.tgz", + "integrity": "sha512-CRbODhYyQx3qp7ZEwzxOk4JBqmD/seJrzPa/cGjY1VtIn5E09Oi9/dB4JwctnfZ8Q8iT7rioVv5k/FNT/uf54g==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.9", + "@esbuild/android-arm": "0.25.9", + "@esbuild/android-arm64": "0.25.9", + "@esbuild/android-x64": "0.25.9", + "@esbuild/darwin-arm64": "0.25.9", + "@esbuild/darwin-x64": "0.25.9", + "@esbuild/freebsd-arm64": "0.25.9", + "@esbuild/freebsd-x64": "0.25.9", + "@esbuild/linux-arm": "0.25.9", + "@esbuild/linux-arm64": "0.25.9", + "@esbuild/linux-ia32": "0.25.9", + "@esbuild/linux-loong64": "0.25.9", + "@esbuild/linux-mips64el": "0.25.9", + "@esbuild/linux-ppc64": "0.25.9", + "@esbuild/linux-riscv64": "0.25.9", + "@esbuild/linux-s390x": "0.25.9", + "@esbuild/linux-x64": "0.25.9", + "@esbuild/netbsd-arm64": "0.25.9", + "@esbuild/netbsd-x64": "0.25.9", + "@esbuild/openbsd-arm64": "0.25.9", + "@esbuild/openbsd-x64": "0.25.9", + "@esbuild/openharmony-arm64": "0.25.9", + "@esbuild/sunos-x64": "0.25.9", + "@esbuild/win32-arm64": "0.25.9", + "@esbuild/win32-ia32": "0.25.9", + "@esbuild/win32-x64": "0.25.9" + } + }, + "node_modules/fast-redact": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/fast-redact/-/fast-redact-3.5.0.tgz", + "integrity": "sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-tsconfig": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.1.tgz", + "integrity": "sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/on-exit-leak-free": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.2.tgz", + "integrity": "sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/openai": { + "version": "5.13.1", + "resolved": "https://registry.npmjs.org/openai/-/openai-5.13.1.tgz", + "integrity": "sha512-Jty97Apw40znKSlXZL2YDap1U2eN9NfXbqm/Rj1rExeOLEnhwezpKQ+v43kIqojavUgm30SR3iuvGlNEBR+AFg==", + "license": "Apache-2.0", + "bin": { + "openai": "bin/cli" + }, + "peerDependencies": { + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + }, + "zod": { + "optional": true + } + } + }, + "node_modules/pino": { + "version": "9.9.0", + "resolved": "https://registry.npmjs.org/pino/-/pino-9.9.0.tgz", + "integrity": "sha512-zxsRIQG9HzG+jEljmvmZupOMDUQ0Jpj0yAgE28jQvvrdYTlEaiGwelJpdndMl/MBuRr70heIj83QyqJUWaU8mQ==", + "license": "MIT", + "dependencies": { + "atomic-sleep": "^1.0.0", + "fast-redact": "^3.1.1", + "on-exit-leak-free": "^2.1.0", + "pino-abstract-transport": "^2.0.0", + "pino-std-serializers": "^7.0.0", + "process-warning": "^5.0.0", + "quick-format-unescaped": "^4.0.3", + "real-require": "^0.2.0", + "safe-stable-stringify": "^2.3.1", + "sonic-boom": "^4.0.1", + "thread-stream": "^3.0.0" + }, + "bin": { + "pino": "bin.js" + } + }, + "node_modules/pino-abstract-transport": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pino-abstract-transport/-/pino-abstract-transport-2.0.0.tgz", + "integrity": "sha512-F63x5tizV6WCh4R6RHyi2Ml+M70DNRXt/+HANowMflpgGFMAym/VKm6G7ZOQRjqN7XbGxK1Lg9t6ZrtzOaivMw==", + "license": "MIT", + "dependencies": { + "split2": "^4.0.0" + } + }, + "node_modules/pino-std-serializers": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pino-std-serializers/-/pino-std-serializers-7.0.0.tgz", + "integrity": "sha512-e906FRY0+tV27iq4juKzSYPbUj2do2X2JX4EzSca1631EB2QJQUqGbDuERal7LCtOpxl6x3+nvo9NPZcmjkiFA==", + "license": "MIT" + }, + "node_modules/posthog-node": { + "version": "5.7.0", + "resolved": "https://registry.npmjs.org/posthog-node/-/posthog-node-5.7.0.tgz", + "integrity": "sha512-6J1AIZWtbr2lEbZOO2AzO/h1FPJjUZM4KWcdaL2UQw7FY8J7VNaH3NiaRockASFmglpID7zEY25gV/YwCtuXjg==", + "license": "MIT", + "dependencies": { + "@posthog/core": "1.0.0" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/process-warning": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/process-warning/-/process-warning-5.0.0.tgz", + "integrity": "sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "MIT" + }, + "node_modules/quick-format-unescaped": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/quick-format-unescaped/-/quick-format-unescaped-4.0.4.tgz", + "integrity": "sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==", + "license": "MIT" + }, + "node_modules/real-require": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/real-require/-/real-require-0.2.0.tgz", + "integrity": "sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==", + "license": "MIT", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/safe-stable-stringify": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/sonic-boom": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.2.0.tgz", + "integrity": "sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww==", + "license": "MIT", + "dependencies": { + "atomic-sleep": "^1.0.0" + } + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "license": "ISC", + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/thread-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-3.1.0.tgz", + "integrity": "sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==", + "license": "MIT", + "dependencies": { + "real-require": "^0.2.0" + } + }, + "node_modules/tsx": { + "version": "4.20.4", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.20.4.tgz", + "integrity": "sha512-yyxBKfORQ7LuRt/BQKBXrpcq59ZvSW0XxwfjAt3w2/8PmdxaFzijtMhTawprSHhpzeM5BgU2hXHG3lklIERZXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.25.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/typescript": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + } + } +} diff --git a/examples/computer-example-ts/src/index.ts b/examples/computer-example-ts/src/index.ts index 1077e088..d58a9843 100644 --- a/examples/computer-example-ts/src/index.ts +++ b/examples/computer-example-ts/src/index.ts @@ -4,13 +4,13 @@ import { executeAction } from "./helpers"; import "dotenv/config"; -const openai = new OpenAI({ apiKey: process.env.OPENAI_KEY }); +const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY }); const COMPUTER_USE_PROMPT = "Open firefox and go to trycua.com"; // Initialize the Computer Connection const computer = new Computer({ - apiKey: process.env.CUA_KEY!, + apiKey: process.env.CUA_API_KEY!, name: process.env.CUA_CONTAINER_NAME!, osType: OSType.LINUX, }); diff --git a/libs/kasm/README.md b/libs/kasm/README.md index 6a504507..2a383535 100644 --- a/libs/kasm/README.md +++ b/libs/kasm/README.md @@ -21,18 +21,6 @@ docker build -t cua-ubuntu:latest . ### Pushing to Registry -To push the container to a Docker registry: - -```bash -# Tag for your registry (replace with your registry URL) -docker tag cua-ubuntu:latest your-registry.com/cua-ubuntu:latest - -# Push to registry -docker push your-registry.com/cua-ubuntu:latest -``` - -For Docker Hub: - ```bash # Tag for Docker Hub (replace 'trycua' with your Docker Hub username) docker tag cua-ubuntu:latest trycua/cua-ubuntu:latest diff --git a/libs/lume/docs/Development.md b/libs/lume/Development.md similarity index 76% rename from libs/lume/docs/Development.md rename to libs/lume/Development.md index cbaa4df5..0ddf8c5e 100644 --- a/libs/lume/docs/Development.md +++ b/libs/lume/Development.md @@ -10,6 +10,14 @@ Lume development requires: - macOS Sequoia 15.2 or higher - (Optional) VS Code with Swift extension +If you're working on Lume in the context of the Cua monorepo, we recommend using the dedicated VS Code workspace configuration: + +```bash +# Open VS Code workspace from the root of the monorepo +code .vscode/lume.code-workspace +``` +This workspace is preconfigured with Swift language support, build tasks, and debug configurations. + ## Setting Up the Repository Locally 1. **Fork the Repository**: Create your own fork of lume diff --git a/libs/lume/README.md b/libs/lume/README.md index ac4257e0..c90c250a 100644 --- a/libs/lume/README.md +++ b/libs/lume/README.md @@ -23,174 +23,42 @@ lume cli
- ```bash lume run macos-sequoia-vanilla:latest ``` -## Development Environment +## Quickstart -If you're working on Lume in the context of the CUA monorepo, we recommend using the dedicated VS Code workspace configuration: - -```bash -# Open VS Code workspace from the root of the monorepo -code .vscode/lume.code-workspace -``` -This workspace is preconfigured with Swift language support, build tasks, and debug configurations. - -## Usage - -```bash -lume - -Commands: - lume create Create a new macOS or Linux VM - lume run Run a VM - lume ls List all VMs - lume get Get detailed information about a VM - lume set Modify VM configuration - lume stop Stop a running VM - lume delete Delete a VM - lume pull Pull a macOS image from container registry - lume push Push a VM image to a container registry - lume clone Clone an existing VM - lume config Get or set lume configuration - lume images List available macOS images in local cache - lume ipsw Get the latest macOS restore image URL - lume prune Remove cached images - lume serve Start the API server - -Options: - --help Show help [boolean] - --version Show version number [boolean] - -Command Options: - create: - --os Operating system to install (macOS or linux, default: macOS) - --cpu Number of CPU cores (default: 4) - --memory Memory size, e.g., 8GB (default: 4GB) - --disk-size Disk size, e.g., 50GB (default: 40GB) - --display Display resolution (default: 1024x768) - --ipsw Path to IPSW file or 'latest' for macOS VMs - --storage VM storage location to use - - run: - --no-display Do not start the VNC client app - --shared-dir Share directory with VM (format: path[:ro|rw]) - --mount For Linux VMs only, attach a read-only disk image - --registry Container registry URL (default: ghcr.io) - --organization Organization to pull from (default: trycua) - --vnc-port Port to use for the VNC server (default: 0 for auto-assign) - --recovery-mode For MacOS VMs only, start VM in recovery mode (default: false) - --storage VM storage location to use - - set: - --cpu New number of CPU cores (e.g., 4) - --memory New memory size (e.g., 8192MB or 8GB) - --disk-size New disk size (e.g., 40960MB or 40GB) - --display New display resolution in format WIDTHxHEIGHT (e.g., 1024x768) - --storage VM storage location to use - - delete: - --force Force deletion without confirmation - --storage VM storage location to use - - pull: - --registry Container registry URL (default: ghcr.io) - --organization Organization to pull from (default: trycua) - --storage VM storage location to use - - push: - --additional-tags Additional tags to push the same image to - --registry Container registry URL (default: ghcr.io) - --organization Organization/user to push to (default: trycua) - --storage VM storage location to use - --chunk-size-mb Chunk size for disk image upload in MB (default: 512) - --verbose Enable verbose logging - --dry-run Prepare files and show plan without uploading - --reassemble Verify integrity by reassembling chunks (requires --dry-run) - - get: - -f, --format Output format (json|text) - --storage VM storage location to use - - stop: - --storage VM storage location to use - - clone: - --source-storage Source VM storage location - --dest-storage Destination VM storage location - - config: - get Get current configuration - storage Manage VM storage locations - add Add a new VM storage location - remove Remove a VM storage location - list List all VM storage locations - default Set the default VM storage location - cache Manage cache settings - get Get current cache directory - set Set cache directory - caching Manage image caching settings - get Show current caching status - set Enable or disable image caching - - serve: - --port Port to listen on (default: 7777) -``` - -## Install - -Install with a single command: +Install and run a prebuilt macOS VM in two commands: ```bash +# Install Lume /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" +# Pull & start a macOS image +lume run macos-sequoia-vanilla:latest ``` -By default, Lume is installed as a background service that starts automatically on login. If you prefer to start the Lume API service manually when needed, you can use the `--no-background-service` option: + +All prebuilt images use the default password `lume`. Change this immediately after your first login using the `passwd` command. + -```bash -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh) --no-background-service" -``` +**System Requirements**: +- Apple Silicon Mac (M1, M2, M3, etc.) +- macOS 13.0 or later +- At least 8GB of RAM (16GB recommended) +- At least 50GB of free disk space -**Note:** With this option, you'll need to manually start the Lume API service by running `lume serve` in your terminal whenever you need to use tools or libraries that rely on the Lume API (such as the Computer-Use Agent). +## Development -You can also download the `lume.pkg.tar.gz` archive from the [latest release](https://github.com/trycua/lume/releases), extract it, and install the package manually. - -## Prebuilt Images - -Pre-built images are available in the registry [ghcr.io/trycua](https://github.com/orgs/trycua/packages). - -**Important Note (v0.2.0+):** Images are being re-uploaded with sparse file system optimizations enabled, resulting in significantly lower actual disk usage. Older images (without the `-sparse` suffix) are now **deprecated**. The last version of `lume` fully supporting the non-sparse images was `v0.1.x`. Starting from `v0.2.0`, lume will automatically pull images optimized with sparse file system support. - -These images come with an SSH server pre-configured and auto-login enabled. - -For the security of your VM, change the default password `lume` immediately after your first login. - -| Image | Tag | Description | Logical Size | -|-------|------------|-------------|------| -| `macos-sequoia-vanilla` | `latest`, `15.2` | macOS Sequoia 15.2 image | 20GB | -| `macos-sequoia-xcode` | `latest`, `15.2` | macOS Sequoia 15.2 image with Xcode command line tools | 22GB | -| `macos-sequoia-cua` | `latest`, `15.3` | macOS Sequoia 15.3 image compatible with the Computer interface | 24GB | -| `ubuntu-noble-vanilla` | `latest`, `24.04.1` | [Ubuntu Server for ARM 24.04.1 LTS](https://ubuntu.com/download/server/arm) with Ubuntu Desktop | 20GB | - -For additional disk space, resize the VM disk after pulling the image using the `lume set --disk-size ` command. Note that the actual disk space used by sparse images will be much lower than the logical size listed. - -## Local API Server - -`lume` exposes a local HTTP API server that listens on `http://localhost:7777/lume`, enabling automated management of VMs. - -```bash -lume serve -``` - -For detailed API documentation, please refer to [API Reference](docs/API-Reference.md). +To get set up with Lume for development, read [these instructions](Development.md). ## Docs -- [API Reference](docs/API-Reference.md) -- [Development](docs/Development.md) -- [FAQ](docs/FAQ.md) +- [Installation](https://trycua.com/docs/libraries/lume/installation) +- [Prebuilt Images](https://trycua.com/docs/libraries/lume/prebuilt-images) +- [CLI Reference](https://trycua.com/docs/libraries/lume/cli-reference) +- [HTTP API](https://trycua.com/docs/libraries/lume/http-api) +- [FAQ](https://trycua.com/docs/libraries/lume/faq) ## Contributing diff --git a/libs/lume/docs/API-Reference.md b/libs/lume/docs/API-Reference.md deleted file mode 100644 index 5af09cdf..00000000 --- a/libs/lume/docs/API-Reference.md +++ /dev/null @@ -1,387 +0,0 @@ -## API Reference - -
-Create VM - POST /vms - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "name": "lume_vm", - "os": "macOS", - "cpu": 2, - "memory": "4GB", - "diskSize": "64GB", - "display": "1024x768", - "ipsw": "latest", - "storage": "ssd" - }' \ - http://localhost:7777/lume/vms -``` -
- -
-Run VM - POST /vms/:name/run - -```bash -# Basic run -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - http://localhost:7777/lume/vms/my-vm-name/run - -# Run with VNC client started and shared directory -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "noDisplay": false, - "sharedDirectories": [ - { - "hostPath": "~/Projects", - "readOnly": false - } - ], - "recoveryMode": false, - "storage": "ssd" - }' \ - http://localhost:7777/lume/vms/lume_vm/run -``` -
- -
-List VMs - GET /vms - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/vms -``` -``` -[ - { - "name": "my-vm", - "state": "stopped", - "os": "macOS", - "cpu": 2, - "memory": "4GB", - "diskSize": "64GB" - }, - { - "name": "my-vm-2", - "state": "stopped", - "os": "linux", - "cpu": 2, - "memory": "4GB", - "diskSize": "64GB" - } -] -``` -
- -
-Get VM Details - GET /vms/:name - -```bash -# Basic get -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/vms/lume_vm - -# Get with storage location specified -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/vms/lume_vm?storage=ssd -``` -``` -{ - "name": "lume_vm", - "state": "running", - "os": "macOS", - "cpu": 2, - "memory": "4GB", - "diskSize": "64GB" -} -``` -
- -
-Update VM Settings - PATCH /vms/:name - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X PATCH \ - -H "Content-Type: application/json" \ - -d '{ - "cpu": 4, - "memory": "8GB", - "diskSize": "128GB", - "storage": "ssd" - }' \ - http://localhost:7777/lume/vms/my-vm-name -``` -
- -
-Stop VM - POST /vms/:name/stop - -```bash -# Basic stop -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - http://localhost:7777/lume/vms/my-vm-name/stop - -# Stop with storage location specified -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - http://localhost:7777/lume/vms/my-vm-name/stop?storage=ssd -``` -
- -
-Delete VM - DELETE /vms/:name - -```bash -# Basic delete -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X DELETE \ - http://localhost:7777/lume/vms/my-vm-name - -# Delete with storage location specified -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X DELETE \ - http://localhost:7777/lume/vms/my-vm-name?storage=ssd -``` -
- -
-Pull Image - POST /pull - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "image": "macos-sequoia-vanilla:latest", - "name": "my-vm-name", - "registry": "ghcr.io", - "organization": "trycua", - "storage": "ssd" - }' \ - http://localhost:7777/lume/pull -``` - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "image": "macos-sequoia-vanilla:15.2", - "name": "macos-sequoia-vanilla" - }' \ - http://localhost:7777/lume/pull -``` -
- -
-Push Image (Async) - POST /vms/push - -```bash -# Push VM 'my-local-vm' to 'my-org/my-image:latest' and 'my-org/my-image:v1' -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "name": "my-local-vm", - "imageName": "my-image", - "tags": ["latest", "v1"], - "organization": "my-org", - "registry": "ghcr.io", - "chunkSizeMb": 512, - "storage": null - }' \ - http://localhost:7777/lume/vms/push -``` - -**Response (202 Accepted):** - -```json -{ - "message": "Push initiated in background", - "name": "my-local-vm", - "imageName": "my-image", - "tags": [ - "latest", - "v1" - ] -} -``` -
- -
-Clone VM - POST /vms/:name/clone - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "name": "source-vm", - "newName": "cloned-vm", - "sourceLocation": "default", - "destLocation": "ssd" - }' \ - http://localhost:7777/lume/vms/clone -``` -
- -
-Get Latest IPSW URL - GET /ipsw - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/ipsw -``` -
- -
-List Images - GET /images - -```bash -# List images with default organization (trycua) -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/images -``` - -```json -{ - "local": [ - "macos-sequoia-xcode:latest", - "macos-sequoia-vanilla:latest" - ] -} -``` -
- -
-Prune Images - POST /lume/prune - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - http://localhost:7777/lume/prune -``` -
- -
-Get Configuration - GET /lume/config - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/config -``` - -```json -{ - "homeDirectory": "~/.lume", - "cacheDirectory": "~/.lume/cache", - "cachingEnabled": true -} -``` -
- -
-Update Configuration - POST /lume/config - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "homeDirectory": "~/custom/lume", - "cacheDirectory": "~/custom/lume/cache", - "cachingEnabled": true - }' \ - http://localhost:7777/lume/config -``` -
- -
-Get VM Storage Locations - GET /lume/config/locations - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - http://localhost:7777/lume/config/locations -``` - -```json -[ - { - "name": "default", - "path": "~/.lume/vms", - "isDefault": true - }, - { - "name": "ssd", - "path": "/Volumes/SSD/lume/vms", - "isDefault": false - } -] -``` -
- -
-Add VM Storage Location - POST /lume/config/locations - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{ - "name": "ssd", - "path": "/Volumes/SSD/lume/vms" - }' \ - http://localhost:7777/lume/config/locations -``` -
- -
-Remove VM Storage Location - DELETE /lume/config/locations/:name - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X DELETE \ - http://localhost:7777/lume/config/locations/ssd -``` -
- -
-Set Default VM Storage Location - POST /lume/config/locations/default/:name - -```bash -curl --connect-timeout 6000 \ - --max-time 5000 \ - -X POST \ - http://localhost:7777/lume/config/locations/default/ssd -``` -
diff --git a/libs/lumier/README.md b/libs/lumier/README.md index 287a6e96..92cfc559 100644 --- a/libs/lumier/README.md +++ b/libs/lumier/README.md @@ -39,19 +39,6 @@ Before using Lumier, make sure you have: /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/lume/scripts/install.sh)" ``` -After installation, Lume runs as a background service and listens on port 7777. This service allows Lumier to create and manage virtual machines. If port 7777 is already in use on your system, you can specify a different port with the `--port` option when running the `install.sh` script. - -## How It Works - -> **Note:** We're using Docker primarily as a convenient delivery mechanism, not as an isolation layer. Unlike traditional Docker containers, Lumier leverages the Apple Virtualization Framework (Apple Vz) through the `lume` CLI to create true virtual machines. - -Here's what's happening behind the scenes: - -1. The Docker container provides a consistent environment to run the Lumier interface -2. Lumier connects to the Lume service running on your host Mac -3. Lume uses Apple's Virtualization Framework to create a true macOS virtual machine -4. The VM runs with hardware acceleration using your Mac's native virtualization capabilities - ## Getting Started ```bash @@ -68,191 +55,14 @@ docker run -it --rm \ After running the command above, you can access your macOS VM through a web browser (e.g., http://localhost:8006). -> **Note:** With the basic setup above, your VM will be reset when you stop the container (ephemeral mode). This means any changes you make inside the macOS VM will be lost. See the section below for how to save your VM state. +> **Note:** With the basic setup above, your VM will be reset when you stop the container (ephemeral mode). This means any changes you make inside the macOS VM will be lost. See [the documentation](https://trycua.com/docs/libraries/lumier/docker) for how to save your VM state. -## Saving Your VM State +## Docs -To save your VM state between sessions (so your changes persist when you stop and restart the container), you'll need to set up a storage location: - -```bash -# First, create a storage directory if it doesn't exist -mkdir -p storage - -# Then run the container with persistent storage -docker run -it --rm \ - --name lumier-vm \ - -p 8006:8006 \ - -v $(pwd)/storage:/storage \ - -e VM_NAME=lumier-vm \ - -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ - -e CPU_CORES=4 \ - -e RAM_SIZE=8192 \ - -e HOST_STORAGE_PATH=$(pwd)/storage \ - trycua/lumier:latest -``` - -This command creates a connection between a folder on your Mac (`$(pwd)/storage`) and a folder inside the Docker container (`/storage`). The `-v` flag (volume mount) and the `HOST_STORAGE_PATH` variable work together to ensure your VM data is saved on your host Mac. - -## Sharing Files with Your VM - -To share files between your Mac and the virtual machine, you can set up a shared folder: - -```bash -# Create both storage and shared folders -mkdir -p storage shared - -# Run with both persistent storage and a shared folder -docker run -it --rm \ - --name lumier-vm \ - -p 8006:8006 \ - -v $(pwd)/storage:/storage \ - -v $(pwd)/shared:/shared \ - -e VM_NAME=lumier-vm \ - -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ - -e CPU_CORES=4 \ - -e RAM_SIZE=8192 \ - -e HOST_STORAGE_PATH=$(pwd)/storage \ - -e HOST_SHARED_PATH=$(pwd)/shared \ - trycua/lumier:latest -``` - -With this setup, any files you place in the `shared` folder on your Mac will be accessible from within the macOS VM, and vice versa. - -## Automating VM Startup with on-logon.sh - -You can automatically run scripts when the VM starts up by placing an `on-logon.sh` script in the shared folder's lifecycle directory. This is useful for setting up your VM environment each time it starts. - -```bash -# Create the lifecycle directory in your shared folder -mkdir -p shared/lifecycle - -# Create a sample on-logon.sh script -cat > shared/lifecycle/on-logon.sh << 'EOF' -#!/usr/bin/env bash - -# Create a file on the desktop -echo "Hello from Lumier!" > /Users/lume/Desktop/hello_lume.txt - -# You can add more commands to execute at VM startup -# For example: -# - Configure environment variables -# - Start applications -# - Mount network drives -# - Set up development environments -EOF - -# Make the script executable -chmod +x shared/lifecycle/on-logon.sh -``` - -The script will be automatically executed when the VM starts up. It runs in the VM context and has access to: - -- The `/Users/lume` user directory (home directory in the VM) -- The shared folder at `/Volumes/My Shared Files` inside the VM -- Any resources available to the VM - -This feature enables automation of VM setup without modifying the base VM image. - -## Using Docker Compose - -You can also use Docker Compose to run Lumier with a simple configuration file. Create a `docker-compose.yml` file with the following content: - -```yaml -version: '3' - -services: - lumier: - image: trycua/lumier:latest - container_name: lumier-vm - restart: unless-stopped - ports: - - "8006:8006" # Port for VNC access - volumes: - - ./storage:/storage # VM persistent storage - - ./shared:/shared # Shared folder accessible in the VM - environment: - - VM_NAME=lumier-vm - - VERSION=ghcr.io/trycua/macos-sequoia-cua:latest - - CPU_CORES=4 - - RAM_SIZE=8192 - - HOST_STORAGE_PATH=${PWD}/storage - - HOST_SHARED_PATH=${PWD}/shared - stop_signal: SIGINT - stop_grace_period: 2m -``` - -Then run Lumier using: - -```bash -# First create the required directories -mkdir -p storage shared - -# Start the container -docker-compose up -d - -# View the logs -docker-compose logs -f - -# Stop the container when done -docker-compose down -``` - -## Building and Customizing Lumier - -If you want to customize the Lumier container or build it from source, you can follow these steps: - -```bash -# 1. Navigate to the Lumier directory -cd libs/lumier - -# 2. Build the Docker image locally -docker build -t lumier-custom:latest . - -# 3. Run your custom build -docker run -it --rm \ - --name lumier-vm \ - -p 8006:8006 \ - -e VM_NAME=lumier-vm \ - -e VERSION=ghcr.io/trycua/macos-sequoia-cua:latest \ - -e CPU_CORES=4 \ - -e RAM_SIZE=8192 \ - lumier-custom:latest -``` - -### Customization Options - -The Dockerfile provides several customization points: - -1. **Base image**: The container uses Debian Bullseye Slim as the base. You can modify this if needed. -2. **Installed packages**: You can add or remove packages in the apt-get install list. -3. **Hooks**: Check the `/run/hooks/` directory for scripts that run at specific points during VM lifecycle. -4. **Configuration**: Review `/run/config/constants.sh` for default settings. - -After making your modifications, you can build and push your custom image to your own Docker Hub repository: - -```bash -# Build with a custom tag -docker build -t yourusername/lumier:custom . - -# Push to Docker Hub (after docker login) -docker push yourusername/lumier:custom -``` - -## Configuration Options - -When running Lumier, you'll need to configure a few things: - -- **Port forwarding** (`-p 8006:8006`): Makes the VM's VNC interface accessible in your browser. If port 8006 is already in use, you can use a different port like `-p 8007:8006`. - -- **Environment variables** (`-e`): Configure your VM settings: - - `VM_NAME`: A name for your virtual machine - - `VERSION`: The macOS image to use - - `CPU_CORES`: Number of CPU cores to allocate - - `RAM_SIZE`: Memory in MB to allocate - - `HOST_STORAGE_PATH`: Path to save VM state (when using persistent storage) - - `HOST_SHARED_PATH`: Path to the shared folder (optional) - -- **Background service**: The `lume serve` service should be running on your host (starts automatically when you install Lume using the `install.sh` script above). +- [Installation](https://trycua.com/docs/libraries/lumier/installation) +- [Docker](https://trycua.com/docs/libraries/lumier/docker) +- [Docker Compose](https://trycua.com/docs/libraries/lumier/docker-compose) +- [Building Lumier](https://trycua.com/docs/libraries/lumier/building-lumier) ## Credits diff --git a/libs/python/agent/README.md b/libs/python/agent/README.md index cec3cd45..ed90076b 100644 --- a/libs/python/agent/README.md +++ b/libs/python/agent/README.md @@ -29,16 +29,6 @@ ```bash pip install "cua-agent[all]" - -# or install specific providers -pip install "cua-agent[openai]" # OpenAI computer-use-preview support -pip install "cua-agent[anthropic]" # Anthropic Claude support -pip install "cua-agent[omni]" # Omniparser + any LLM support -pip install "cua-agent[uitars]" # UI-TARS -pip install "cua-agent[uitars-mlx]" # UI-TARS + MLX support -pip install "cua-agent[uitars-hf]" # UI-TARS + Huggingface support -pip install "cua-agent[glm45v-hf]" # GLM-4.5V + Huggingface support -pip install "cua-agent[ui]" # Gradio UI support ``` ## Quick Start @@ -79,303 +69,18 @@ if __name__ == "__main__": asyncio.run(main()) ``` -## Supported Models +## Docs -### Anthropic Claude (Computer Use API) -```python -model="anthropic/claude-3-5-sonnet-20241022" -model="anthropic/claude-3-7-sonnet-20250219" -model="anthropic/claude-opus-4-20250514" -model="anthropic/claude-sonnet-4-20250514" -``` - -### OpenAI Computer Use Preview -```python -model="openai/computer-use-preview" -``` - -### UI-TARS (Local or Huggingface Inference) -```python -model="huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B" -model="ollama_chat/0000/ui-tars-1.5-7b" -``` - -### Omniparser + Any LLM -```python -model="omniparser+ollama_chat/mistral-small3.2" -model="omniparser+vertex_ai/gemini-pro" -model="omniparser+anthropic/claude-3-5-sonnet-20241022" -model="omniparser+openai/gpt-4o" -``` - -## Custom Tools - -Define custom tools using decorated functions: - -```python -from computer.helpers import sandboxed - -@sandboxed() -def read_file(location: str) -> str: - """Read contents of a file - - Parameters - ---------- - location : str - Path to the file to read - - Returns - ------- - str - Contents of the file or error message - """ - try: - with open(location, 'r') as f: - return f.read() - except Exception as e: - return f"Error reading file: {str(e)}" - -def calculate(a: int, b: int) -> int: - """Calculate the sum of two integers""" - return a + b - -# Use with agent -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer, read_file, calculate] -) -``` - -## Callbacks System - -agent provides a comprehensive callback system for extending functionality: - -### Built-in Callbacks - -```python -from agent.callbacks import ( - ImageRetentionCallback, - TrajectorySaverCallback, - BudgetManagerCallback, - LoggingCallback -) - -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - tools=[computer], - callbacks=[ - ImageRetentionCallback(only_n_most_recent_images=3), - TrajectorySaverCallback(trajectory_dir="trajectories"), - BudgetManagerCallback(max_budget=10.0, raise_error=True), - LoggingCallback(level=logging.INFO) - ] -) -``` - -### Custom Callbacks - -```python -from agent.callbacks.base import AsyncCallbackHandler - -class CustomCallback(AsyncCallbackHandler): - async def on_llm_start(self, messages): - """Preprocess messages before LLM call""" - # Add custom preprocessing logic - return messages - - async def on_llm_end(self, messages): - """Postprocess messages after LLM call""" - # Add custom postprocessing logic - return messages - - async def on_usage(self, usage): - """Track usage information""" - print(f"Tokens used: {usage.total_tokens}") -``` - -## Budget Management - -Control costs with built-in budget management: - -```python -# Simple budget limit -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - max_trajectory_budget=5.0 # $5 limit -) - -# Advanced budget configuration -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - max_trajectory_budget={ - "max_budget": 10.0, - "raise_error": True, # Raise error when exceeded - "reset_after_each_run": False # Persistent across runs - } -) -``` - -## Trajectory Management - -Save and replay agent conversations: - -```python -agent = ComputerAgent( - model="anthropic/claude-3-5-sonnet-20241022", - trajectory_dir="trajectories", # Auto-save trajectories - tools=[computer] -) - -# Trajectories are saved with: -# - Complete conversation history -# - Usage statistics and costs -# - Timestamps and metadata -# - Screenshots and computer actions -``` - -## Configuration Options - -### ComputerAgent Parameters - -- `model`: Model identifier (required) -- `tools`: List of computer objects and decorated functions -- `callbacks`: List of callback handlers for extensibility -- `only_n_most_recent_images`: Limit recent images to prevent context overflow -- `verbosity`: Logging level (logging.INFO, logging.DEBUG, etc.) -- `trajectory_dir`: Directory to save conversation trajectories -- `max_retries`: Maximum API call retries (default: 3) -- `screenshot_delay`: Delay between actions and screenshots (default: 0.5s) -- `use_prompt_caching`: Enable prompt caching for supported models -- `max_trajectory_budget`: Budget limit configuration - -### Environment Variables - -```bash -# Computer instance (cloud) -export CUA_CONTAINER_NAME="your-container-name" -export CUA_API_KEY="your-cua-api-key" - -# LLM API keys -export ANTHROPIC_API_KEY="your-anthropic-key" -export OPENAI_API_KEY="your-openai-key" -``` - -## Advanced Usage - -### Streaming Responses - -```python -async for result in agent.run(messages, stream=True): - # Process streaming chunks - for item in result["output"]: - if item["type"] == "message": - print(item["content"][0]["text"], end="", flush=True) - elif item["type"] == "computer_call": - action = item["action"] - print(f"\n[Action: {action['type']}]") -``` - -### Interactive Chat Loop - -```python -history = [] -while True: - user_input = input("> ") - if user_input.lower() in ['quit', 'exit']: - break - - history.append({"role": "user", "content": user_input}) - - async for result in agent.run(history): - history += result["output"] - - # Display assistant responses - for item in result["output"]: - if item["type"] == "message": - print(item["content"][0]["text"]) -``` - -### Error Handling - -```python -try: - async for result in agent.run(messages): - # Process results - pass -except BudgetExceededException: - print("Budget limit exceeded") -except Exception as e: - print(f"Agent error: {e}") -``` - -## API Reference - -### ComputerAgent.run() - -```python -async def run( - self, - messages: Messages, - stream: bool = False, - **kwargs -) -> AsyncGenerator[Dict[str, Any], None]: - """ - Run the agent with the given messages. - - Args: - messages: List of message dictionaries - stream: Whether to stream the response - **kwargs: Additional arguments - - Returns: - AsyncGenerator that yields response chunks - """ -``` - -### Message Format - -```python -messages = [ - { - "role": "user", - "content": "Take a screenshot and describe what you see" - }, - { - "role": "assistant", - "content": "I'll take a screenshot for you." - } -] -``` - -### Response Format - -```python -{ - "output": [ - { - "type": "message", - "role": "assistant", - "content": [{"type": "output_text", "text": "I can see..."}] - }, - { - "type": "computer_call", - "action": {"type": "screenshot"}, - "call_id": "call_123" - }, - { - "type": "computer_call_output", - "call_id": "call_123", - "output": {"image_url": "data:image/png;base64,..."} - } - ], - "usage": { - "prompt_tokens": 150, - "completion_tokens": 75, - "total_tokens": 225, - "response_cost": 0.01, - } -} -``` +- [Agent Loops](https://trycua.com/docs/agent-sdk/agent-loops) +- [Supported Agents](https://trycua.com/docs/agent-sdk/supported-agents) +- [Supported Models](https://trycua.com/docs/agent-sdk/supported-models) +- [Chat History](https://trycua.com/docs/agent-sdk/chat-history) +- [Callbacks](https://trycua.com/docs/agent-sdk/callbacks) +- [Custom Tools](https://trycua.com/docs/agent-sdk/custom-tools) +- [Custom Computer Handlers](https://trycua.com/docs/agent-sdk/custom-computer-handlers) +- [Prompt Caching](https://trycua.com/docs/agent-sdk/prompt-caching) +- [Usage Tracking](https://trycua.com/docs/agent-sdk/usage-tracking) +- [Benchmarks](https://trycua.com/docs/agent-sdk/benchmarks) ## License diff --git a/libs/python/agent/agent/adapters/__init__.py b/libs/python/agent/agent/adapters/__init__.py index 3a5c0301..1f07a9fc 100644 --- a/libs/python/agent/agent/adapters/__init__.py +++ b/libs/python/agent/agent/adapters/__init__.py @@ -4,8 +4,10 @@ Adapters package for agent - Custom LLM adapters for LiteLLM from .huggingfacelocal_adapter import HuggingFaceLocalAdapter from .human_adapter import HumanAdapter +from .mlxvlm_adapter import MLXVLMAdapter __all__ = [ "HuggingFaceLocalAdapter", "HumanAdapter", + "MLXVLMAdapter", ] diff --git a/libs/python/agent/agent/adapters/mlxvlm_adapter.py b/libs/python/agent/agent/adapters/mlxvlm_adapter.py new file mode 100644 index 00000000..8255725b --- /dev/null +++ b/libs/python/agent/agent/adapters/mlxvlm_adapter.py @@ -0,0 +1,359 @@ +import asyncio +import functools +import warnings +import io +import base64 +import math +import re +from concurrent.futures import ThreadPoolExecutor +from typing import Iterator, AsyncIterator, Dict, List, Any, Optional, Tuple, cast +from PIL import Image +from litellm.types.utils import GenericStreamingChunk, ModelResponse +from litellm.llms.custom_llm import CustomLLM +from litellm import completion, acompletion + +# Try to import MLX dependencies +try: + import mlx.core as mx + from mlx_vlm import load, generate + from mlx_vlm.prompt_utils import apply_chat_template + from mlx_vlm.utils import load_config + from transformers.tokenization_utils import PreTrainedTokenizer + MLX_AVAILABLE = True +except ImportError: + MLX_AVAILABLE = False + +# Constants for smart_resize +IMAGE_FACTOR = 28 +MIN_PIXELS = 100 * 28 * 28 +MAX_PIXELS = 16384 * 28 * 28 +MAX_RATIO = 200 + +def round_by_factor(number: float, factor: int) -> int: + """Returns the closest integer to 'number' that is divisible by 'factor'.""" + return round(number / factor) * factor + +def ceil_by_factor(number: float, factor: int) -> int: + """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'.""" + return math.ceil(number / factor) * factor + +def floor_by_factor(number: float, factor: int) -> int: + """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'.""" + return math.floor(number / factor) * factor + +def smart_resize( + height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS +) -> tuple[int, int]: + """ + Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + 3. The aspect ratio of the image is maintained as closely as possible. + """ + if max(height, width) / min(height, width) > MAX_RATIO: + raise ValueError( + f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}" + ) + h_bar = max(factor, round_by_factor(height, factor)) + w_bar = max(factor, round_by_factor(width, factor)) + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = floor_by_factor(height / beta, factor) + w_bar = floor_by_factor(width / beta, factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = ceil_by_factor(height * beta, factor) + w_bar = ceil_by_factor(width * beta, factor) + return h_bar, w_bar + + +class MLXVLMAdapter(CustomLLM): + """MLX VLM Adapter for running vision-language models locally using MLX.""" + + def __init__(self, **kwargs): + """Initialize the adapter. + + Args: + **kwargs: Additional arguments + """ + super().__init__() + + self.models = {} # Cache for loaded models + self.processors = {} # Cache for loaded processors + self.configs = {} # Cache for loaded configs + self._executor = ThreadPoolExecutor(max_workers=1) # Single thread pool + + def _load_model_and_processor(self, model_name: str): + """Load model and processor if not already cached. + + Args: + model_name: Name of the model to load + + Returns: + Tuple of (model, processor, config) + """ + if not MLX_AVAILABLE: + raise ImportError("MLX VLM dependencies not available. Please install mlx-vlm.") + + if model_name not in self.models: + # Load model and processor + model_obj, processor = load( + model_name, + processor_kwargs={"min_pixels": MIN_PIXELS, "max_pixels": MAX_PIXELS} + ) + config = load_config(model_name) + + # Cache them + self.models[model_name] = model_obj + self.processors[model_name] = processor + self.configs[model_name] = config + + return self.models[model_name], self.processors[model_name], self.configs[model_name] + + def _process_coordinates(self, text: str, original_size: Tuple[int, int], model_size: Tuple[int, int]) -> str: + """Process coordinates in box tokens based on image resizing using smart_resize approach. + + Args: + text: Text containing box tokens + original_size: Original image size (width, height) + model_size: Model processed image size (width, height) + + Returns: + Text with processed coordinates + """ + # Find all box tokens + box_pattern = r"<\|box_start\|>\((\d+),\s*(\d+)\)<\|box_end\|>" + + def process_coords(match): + model_x, model_y = int(match.group(1)), int(match.group(2)) + # Scale coordinates from model space to original image space + # Both original_size and model_size are in (width, height) format + new_x = int(model_x * original_size[0] / model_size[0]) # Width + new_y = int(model_y * original_size[1] / model_size[1]) # Height + return f"<|box_start|>({new_x},{new_y})<|box_end|>" + + return re.sub(box_pattern, process_coords, text) + + def _convert_messages(self, messages: List[Dict[str, Any]]) -> Tuple[List[Dict[str, Any]], List[Image.Image], Dict[int, Tuple[int, int]], Dict[int, Tuple[int, int]]]: + """Convert OpenAI format messages to MLX VLM format and extract images. + + Args: + messages: Messages in OpenAI format + + Returns: + Tuple of (processed_messages, images, original_sizes, model_sizes) + """ + processed_messages = [] + images = [] + original_sizes = {} # Track original sizes of images for coordinate mapping + model_sizes = {} # Track model processed sizes + image_index = 0 + + for message in messages: + processed_message = { + "role": message["role"], + "content": [] + } + + content = message.get("content", []) + if isinstance(content, str): + # Simple text content + processed_message["content"] = content + elif isinstance(content, list): + # Multi-modal content + processed_content = [] + for item in content: + if item.get("type") == "text": + processed_content.append({ + "type": "text", + "text": item.get("text", "") + }) + elif item.get("type") == "image_url": + image_url = item.get("image_url", {}).get("url", "") + pil_image = None + + if image_url.startswith("data:image/"): + # Extract base64 data + base64_data = image_url.split(',')[1] + # Convert base64 to PIL Image + image_data = base64.b64decode(base64_data) + pil_image = Image.open(io.BytesIO(image_data)) + else: + # Handle file path or URL + pil_image = Image.open(image_url) + + # Store original image size for coordinate mapping + original_size = pil_image.size + original_sizes[image_index] = original_size + + # Use smart_resize to determine model size + # Note: smart_resize expects (height, width) but PIL gives (width, height) + height, width = original_size[1], original_size[0] + new_height, new_width = smart_resize(height, width) + # Store model size in (width, height) format for consistent coordinate processing + model_sizes[image_index] = (new_width, new_height) + + # Resize the image using the calculated dimensions from smart_resize + resized_image = pil_image.resize((new_width, new_height)) + images.append(resized_image) + + # Add image placeholder to content + processed_content.append({ + "type": "image" + }) + + image_index += 1 + + processed_message["content"] = processed_content + + processed_messages.append(processed_message) + + return processed_messages, images, original_sizes, model_sizes + + def _generate(self, **kwargs) -> str: + """Generate response using the local MLX VLM model. + + Args: + **kwargs: Keyword arguments containing messages and model info + + Returns: + Generated text response + """ + messages = kwargs.get('messages', []) + model_name = kwargs.get('model', 'mlx-community/UI-TARS-1.5-7B-4bit') + max_tokens = kwargs.get('max_tokens', 128) + + # Warn about ignored kwargs + ignored_kwargs = set(kwargs.keys()) - {'messages', 'model', 'max_tokens'} + if ignored_kwargs: + warnings.warn(f"Ignoring unsupported kwargs: {ignored_kwargs}") + + # Load model and processor + model, processor, config = self._load_model_and_processor(model_name) + + # Convert messages and extract images + processed_messages, images, original_sizes, model_sizes = self._convert_messages(messages) + + # Process user text input with box coordinates after image processing + # Swap original_size and model_size arguments for inverse transformation + for msg_idx, msg in enumerate(processed_messages): + if msg.get("role") == "user" and isinstance(msg.get("content"), str): + content = msg.get("content", "") + if "<|box_start|>" in content and original_sizes and model_sizes and 0 in original_sizes and 0 in model_sizes: + orig_size = original_sizes[0] + model_size = model_sizes[0] + # Swap arguments to perform inverse transformation for user input + processed_messages[msg_idx]["content"] = self._process_coordinates(content, model_size, orig_size) + + try: + # Format prompt according to model requirements using the processor directly + prompt = processor.apply_chat_template( + processed_messages, + tokenize=False, + add_generation_prompt=True, + return_tensors='pt' + ) + tokenizer = cast(PreTrainedTokenizer, processor) + + # Generate response + text_content, usage = generate( + model, + tokenizer, + str(prompt), + images, # type: ignore + verbose=False, + max_tokens=max_tokens + ) + + except Exception as e: + raise RuntimeError(f"Error generating response: {str(e)}") from e + + # Process coordinates in the response back to original image space + if original_sizes and model_sizes and 0 in original_sizes and 0 in model_sizes: + # Get original image size and model size (using the first image) + orig_size = original_sizes[0] + model_size = model_sizes[0] + + # Check if output contains box tokens that need processing + if "<|box_start|>" in text_content: + # Process coordinates from model space back to original image space + text_content = self._process_coordinates(text_content, orig_size, model_size) + + return text_content + + def completion(self, *args, **kwargs) -> ModelResponse: + """Synchronous completion method. + + Returns: + ModelResponse with generated text + """ + generated_text = self._generate(**kwargs) + + result = completion( + model=f"mlx/{kwargs.get('model', 'mlx-community/UI-TARS-1.5-7B-4bit')}", + mock_response=generated_text, + ) + return cast(ModelResponse, result) + + async def acompletion(self, *args, **kwargs) -> ModelResponse: + """Asynchronous completion method. + + Returns: + ModelResponse with generated text + """ + # Run _generate in thread pool to avoid blocking + loop = asyncio.get_event_loop() + generated_text = await loop.run_in_executor( + self._executor, + functools.partial(self._generate, **kwargs) + ) + + result = await acompletion( + model=f"mlx/{kwargs.get('model', 'mlx-community/UI-TARS-1.5-7B-4bit')}", + mock_response=generated_text, + ) + return cast(ModelResponse, result) + + def streaming(self, *args, **kwargs) -> Iterator[GenericStreamingChunk]: + """Synchronous streaming method. + + Returns: + Iterator of GenericStreamingChunk + """ + generated_text = self._generate(**kwargs) + + generic_streaming_chunk: GenericStreamingChunk = { + "finish_reason": "stop", + "index": 0, + "is_finished": True, + "text": generated_text, + "tool_use": None, + "usage": {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0}, + } + + yield generic_streaming_chunk + + async def astreaming(self, *args, **kwargs) -> AsyncIterator[GenericStreamingChunk]: + """Asynchronous streaming method. + + Returns: + AsyncIterator of GenericStreamingChunk + """ + # Run _generate in thread pool to avoid blocking + loop = asyncio.get_event_loop() + generated_text = await loop.run_in_executor( + self._executor, + functools.partial(self._generate, **kwargs) + ) + + generic_streaming_chunk: GenericStreamingChunk = { + "finish_reason": "stop", + "index": 0, + "is_finished": True, + "text": generated_text, + "tool_use": None, + "usage": {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0}, + } + + yield generic_streaming_chunk \ No newline at end of file diff --git a/libs/python/agent/agent/agent.py b/libs/python/agent/agent/agent.py index 78fc3f45..b796866d 100644 --- a/libs/python/agent/agent/agent.py +++ b/libs/python/agent/agent/agent.py @@ -3,6 +3,7 @@ ComputerAgent - Main agent class that selects and runs agent loops """ import asyncio +from pathlib import Path from typing import Dict, List, Any, Optional, AsyncGenerator, Union, cast, Callable, Set, Tuple from litellm.responses.utils import Usage @@ -22,6 +23,7 @@ import inspect from .adapters import ( HuggingFaceLocalAdapter, HumanAdapter, + MLXVLMAdapter, ) from .callbacks import ( ImageRetentionCallback, @@ -29,6 +31,7 @@ from .callbacks import ( TrajectorySaverCallback, BudgetManagerCallback, TelemetryCallback, + OperatorNormalizerCallback ) from .computers import ( AsyncComputerHandler, @@ -160,7 +163,7 @@ class ComputerAgent: only_n_most_recent_images: Optional[int] = None, callbacks: Optional[List[Any]] = None, verbosity: Optional[int] = None, - trajectory_dir: Optional[str] = None, + trajectory_dir: Optional[str | Path | dict] = None, max_retries: Optional[int] = 3, screenshot_delay: Optional[float | int] = 0.5, use_prompt_caching: Optional[bool] = False, @@ -185,7 +188,11 @@ class ComputerAgent: max_trajectory_budget: If set, adds BudgetManagerCallback to track usage costs and stop when budget is exceeded telemetry_enabled: If set, adds TelemetryCallback to track anonymized usage data. Enabled by default. **kwargs: Additional arguments passed to the agent loop - """ + """ + # If the loop is "human/human", we need to prefix a grounding model fallback + if model in ["human/human", "human"]: + model = "openai/computer-use-preview+human/human" + self.model = model self.tools = tools or [] self.custom_loop = custom_loop @@ -201,6 +208,9 @@ class ComputerAgent: # == Add built-in callbacks == + # Prepend operator normalizer callback + self.callbacks.insert(0, OperatorNormalizerCallback()) + # Add telemetry callback if telemetry_enabled is set if self.telemetry_enabled: if isinstance(self.telemetry_enabled, bool): @@ -218,7 +228,10 @@ class ComputerAgent: # Add trajectory saver callback if trajectory_dir is set if self.trajectory_dir: - self.callbacks.append(TrajectorySaverCallback(self.trajectory_dir)) + if isinstance(self.trajectory_dir, dict): + self.callbacks.append(TrajectorySaverCallback(**self.trajectory_dir)) + elif isinstance(self.trajectory_dir, (str, Path)): + self.callbacks.append(TrajectorySaverCallback(str(self.trajectory_dir))) # Add budget manager if max_trajectory_budget is set if max_trajectory_budget: @@ -234,9 +247,11 @@ class ComputerAgent: device="auto" ) human_adapter = HumanAdapter() + mlx_adapter = MLXVLMAdapter() litellm.custom_provider_map = [ {"provider": "huggingface-local", "custom_handler": hf_adapter}, - {"provider": "human", "custom_handler": human_adapter} + {"provider": "human", "custom_handler": human_adapter}, + {"provider": "mlx", "custom_handler": mlx_adapter} ] litellm.suppress_debug_info = True diff --git a/libs/python/agent/agent/callbacks/__init__.py b/libs/python/agent/agent/callbacks/__init__.py index ffe34551..e0befcc7 100644 --- a/libs/python/agent/agent/callbacks/__init__.py +++ b/libs/python/agent/agent/callbacks/__init__.py @@ -8,6 +8,7 @@ from .logging import LoggingCallback from .trajectory_saver import TrajectorySaverCallback from .budget_manager import BudgetManagerCallback from .telemetry import TelemetryCallback +from .operator_validator import OperatorNormalizerCallback __all__ = [ "AsyncCallbackHandler", @@ -16,4 +17,5 @@ __all__ = [ "TrajectorySaverCallback", "BudgetManagerCallback", "TelemetryCallback", + "OperatorNormalizerCallback", ] diff --git a/libs/python/agent/agent/callbacks/image_retention.py b/libs/python/agent/agent/callbacks/image_retention.py index d91754b1..320a3f43 100644 --- a/libs/python/agent/agent/callbacks/image_retention.py +++ b/libs/python/agent/agent/callbacks/image_retention.py @@ -50,90 +50,41 @@ class ImageRetentionCallback(AsyncCallbackHandler): """ if self.only_n_most_recent_images is None: return messages - - # First pass: Assign call_id to reasoning items based on the next computer_call - messages_with_call_ids = [] - for i, msg in enumerate(messages): - msg_copy = msg.copy() if isinstance(msg, dict) else msg - - # If this is a reasoning item without a call_id, find the next computer_call - if (msg_copy.get("type") == "reasoning" and - not msg_copy.get("call_id")): - # Look ahead for the next computer_call - for j in range(i + 1, len(messages)): - next_msg = messages[j] - if (next_msg.get("type") == "computer_call" and - next_msg.get("call_id")): - msg_copy["call_id"] = next_msg.get("call_id") - break - - messages_with_call_ids.append(msg_copy) - - # Find all computer_call_output items with images and their call_ids - image_call_ids = [] - for msg in reversed(messages_with_call_ids): # Process in reverse to get most recent first - if (msg.get("type") == "computer_call_output" and - isinstance(msg.get("output"), dict) and - "image_url" in msg.get("output", {})): - call_id = msg.get("call_id") - if call_id and call_id not in image_call_ids: - image_call_ids.append(call_id) - if len(image_call_ids) >= self.only_n_most_recent_images: - break - - # Keep the most recent N image call_ids (reverse to get chronological order) - keep_call_ids = set(image_call_ids[:self.only_n_most_recent_images]) - - # Filter messages: remove computer_call, computer_call_output, and reasoning for old images - filtered_messages = [] - for msg in messages_with_call_ids: - msg_type = msg.get("type") - call_id = msg.get("call_id") - - # Remove old computer_call items - if msg_type == "computer_call" and call_id not in keep_call_ids: - # Check if this call_id corresponds to an image call - has_image_output = any( - m.get("type") == "computer_call_output" and - m.get("call_id") == call_id and - isinstance(m.get("output"), dict) and - "image_url" in m.get("output", {}) - for m in messages_with_call_ids - ) - if has_image_output: - continue # Skip this computer_call - - # Remove old computer_call_output items with images - if (msg_type == "computer_call_output" and - call_id not in keep_call_ids and - isinstance(msg.get("output"), dict) and - "image_url" in msg.get("output", {})): - continue # Skip this computer_call_output - - # Remove old reasoning items that are paired with removed computer calls - if (msg_type == "reasoning" and - call_id and call_id not in keep_call_ids): - # Check if this call_id corresponds to an image call that's being removed - has_image_output = any( - m.get("type") == "computer_call_output" and - m.get("call_id") == call_id and - isinstance(m.get("output"), dict) and - "image_url" in m.get("output", {}) - for m in messages_with_call_ids - ) - if has_image_output: - continue # Skip this reasoning item - - filtered_messages.append(msg) - - # Clean up: Remove call_id from reasoning items before returning - final_messages = [] - for msg in filtered_messages: - if msg.get("type") == "reasoning" and "call_id" in msg: - # Create a copy without call_id for reasoning items - cleaned_msg = {k: v for k, v in msg.items() if k != "call_id"} - final_messages.append(cleaned_msg) - else: - final_messages.append(msg) - - return final_messages \ No newline at end of file + + # Gather indices of all computer_call_output messages that contain an image_url + output_indices: List[int] = [] + for idx, msg in enumerate(messages): + if msg.get("type") == "computer_call_output": + out = msg.get("output") + if isinstance(out, dict) and ("image_url" in out): + output_indices.append(idx) + + # Nothing to trim + if len(output_indices) <= self.only_n_most_recent_images: + return messages + + # Determine which outputs to keep (most recent N) + keep_output_indices = set(output_indices[-self.only_n_most_recent_images :]) + + # Build set of indices to remove in one pass + to_remove: set[int] = set() + + for idx in output_indices: + if idx in keep_output_indices: + continue # keep this screenshot and its context + + to_remove.add(idx) # remove the computer_call_output itself + + # Remove the immediately preceding computer_call with matching call_id (if present) + call_id = messages[idx].get("call_id") + prev_idx = idx - 1 + if prev_idx >= 0 and messages[prev_idx].get("type") == "computer_call" and messages[prev_idx].get("call_id") == call_id: + to_remove.add(prev_idx) + # Check a single reasoning immediately before that computer_call + r_idx = prev_idx - 1 + if r_idx >= 0 and messages[r_idx].get("type") == "reasoning": + to_remove.add(r_idx) + + # Construct filtered list + filtered = [m for i, m in enumerate(messages) if i not in to_remove] + return filtered \ No newline at end of file diff --git a/libs/python/agent/agent/callbacks/operator_validator.py b/libs/python/agent/agent/callbacks/operator_validator.py new file mode 100644 index 00000000..56a9c280 --- /dev/null +++ b/libs/python/agent/agent/callbacks/operator_validator.py @@ -0,0 +1,138 @@ +""" +OperatorValidatorCallback + +Ensures agent output actions conform to expected schemas by fixing common issues: +- click: add default button='left' if missing +- keypress: wrap keys string into a list +- etc. + +This runs in on_llm_end, which receives the output array (AgentMessage[] as dicts). +The purpose is to avoid spending another LLM call to fix broken computer call syntax when possible. +""" +from __future__ import annotations + +from typing import Any, Dict, List + +from .base import AsyncCallbackHandler + + +class OperatorNormalizerCallback(AsyncCallbackHandler): + """Normalizes common computer call hallucinations / errors in computer call syntax.""" + + async def on_llm_end(self, output: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + # Mutate in-place as requested, but still return the list for chaining + for item in output or []: + if item.get("type") != "computer_call": + continue + action = item.get("action") + if not isinstance(action, dict): + continue + + # rename mouse click actions to "click" + for mouse_btn in ["left", "right", "wheel", "back", "forward"]: + if action.get("type", "") == f"{mouse_btn}_click": + action["type"] = "click" + action["button"] = mouse_btn + # rename hotkey actions to "keypress" + for alias in ["hotkey", "key", "press", "key_press"]: + if action.get("type", "") == alias: + action["type"] = "keypress" + # assume click actions + if "button" in action and "type" not in action: + action["type"] = "click" + if "click" in action and "type" not in action: + action["type"] = "click" + if ("scroll_x" in action or "scroll_y" in action) and "type" not in action: + action["type"] = "scroll" + if "text" in action and "type" not in action: + action["type"] = "type" + + action_type = action.get("type") + def _keep_keys(action: Dict[str, Any], keys_to_keep: List[str]): + """Keep only the provided keys on action; delete everything else. + Always ensures required 'type' is present if listed in keys_to_keep. + """ + for key in list(action.keys()): + if key not in keys_to_keep: + del action[key] + # rename "coordinate" to "x", "y" + if "coordinate" in action: + action["x"] = action["coordinate"][0] + action["y"] = action["coordinate"][1] + del action["coordinate"] + if action_type == "click": + # convert "click" to "button" + if "button" not in action and "click" in action: + action["button"] = action["click"] + del action["click"] + # default button to "left" + action["button"] = action.get("button", "left") + # add default scroll x, y if missing + if action_type == "scroll": + action["scroll_x"] = action.get("scroll_x", 0) + action["scroll_y"] = action.get("scroll_y", 0) + # ensure keys arg is a list (normalize aliases first) + if action_type == "keypress": + keys = action.get("keys") + for keys_alias in ["keypress", "key", "press", "key_press", "text"]: + if keys_alias in action: + action["keys"] = action[keys_alias] + del action[keys_alias] + keys = action.get("keys") + if isinstance(keys, str): + action["keys"] = keys.replace("-", "+").split("+") if len(keys) > 1 else [keys] + required_keys_by_type = { + # OpenAI actions + "click": ["type", "button", "x", "y"], + "double_click": ["type", "x", "y"], + "drag": ["type", "path"], + "keypress": ["type", "keys"], + "move": ["type", "x", "y"], + "screenshot": ["type"], + "scroll": ["type", "scroll_x", "scroll_y", "x", "y"], + "type": ["type", "text"], + "wait": ["type"], + # Anthropic actions + "left_mouse_down": ["type", "x", "y"], + "left_mouse_up": ["type", "x", "y"], + "triple_click": ["type", "button", "x", "y"], + } + keep = required_keys_by_type.get(action_type or "") + if keep: + _keep_keys(action, keep) + + + # # Second pass: if an assistant message is immediately followed by a computer_call, + # # replace the assistant message itself with a reasoning message with summary text. + # if isinstance(output, list): + # for i, item in enumerate(output): + # # AssistantMessage shape: { type: 'message', role: 'assistant', content: OutputContent[] } + # if item.get("type") == "message" and item.get("role") == "assistant": + # next_idx = i + 1 + # if next_idx >= len(output): + # continue + # next_item = output[next_idx] + # if not isinstance(next_item, dict): + # continue + # if next_item.get("type") != "computer_call": + # continue + # contents = item.get("content") or [] + # # Extract text from OutputContent[] + # text_parts: List[str] = [] + # if isinstance(contents, list): + # for c in contents: + # if isinstance(c, dict) and c.get("type") == "output_text" and isinstance(c.get("text"), str): + # text_parts.append(c["text"]) + # text_content = "\n".join(text_parts).strip() + # # Replace assistant message with reasoning message + # output[i] = { + # "type": "reasoning", + # "summary": [ + # { + # "type": "summary_text", + # "text": text_content, + # } + # ], + # } + + return output diff --git a/libs/python/agent/agent/callbacks/trajectory_saver.py b/libs/python/agent/agent/callbacks/trajectory_saver.py index 805b535d..a65722aa 100644 --- a/libs/python/agent/agent/callbacks/trajectory_saver.py +++ b/libs/python/agent/agent/callbacks/trajectory_saver.py @@ -11,6 +11,8 @@ from pathlib import Path from typing import List, Dict, Any, Optional, Union, override from PIL import Image, ImageDraw import io +from copy import deepcopy + from .base import AsyncCallbackHandler def sanitize_image_urls(data: Any) -> Any: @@ -43,6 +45,64 @@ def sanitize_image_urls(data: Any) -> Any: return data +def extract_computer_call_outputs(items: List[Dict[str, Any]], screenshot_dir: Optional[Path]) -> List[Dict[str, Any]]: + """ + Save any base64-encoded screenshots from computer_call_output entries to files and + replace their image_url with the saved file path when a call_id is present. + + Only operates if screenshot_dir is provided and exists; otherwise returns items unchanged. + + Args: + items: List of message/result dicts potentially containing computer_call_output entries + screenshot_dir: Directory to write screenshots into + + Returns: + A new list with updated image_url fields when applicable. + """ + if not items: + return items + if not screenshot_dir or not screenshot_dir.exists(): + return items + + updated: List[Dict[str, Any]] = [] + for item in items: + # work on a shallow copy; deep copy nested 'output' if we modify it + msg = dict(item) + try: + if msg.get("type") == "computer_call_output": + call_id = msg.get("call_id") + output = msg.get("output", {}) + image_url = output.get("image_url") + if call_id and isinstance(image_url, str) and image_url.startswith("data:"): + # derive extension from MIME type e.g. data:image/png;base64, + try: + ext = image_url.split(";", 1)[0].split("/")[-1] + if not ext: + ext = "png" + except Exception: + ext = "png" + out_path = screenshot_dir / f"{call_id}.{ext}" + # write file if it doesn't exist + if not out_path.exists(): + try: + b64_payload = image_url.split(",", 1)[1] + img_bytes = base64.b64decode(b64_payload) + out_path.parent.mkdir(parents=True, exist_ok=True) + with open(out_path, "wb") as f: + f.write(img_bytes) + except Exception: + # if anything fails, skip modifying this message + pass + # update image_url to file path + new_output = dict(output) + new_output["image_url"] = str(out_path) + msg["output"] = new_output + except Exception: + # do not block on malformed entries; keep original + pass + updated.append(msg) + return updated + class TrajectorySaverCallback(AsyncCallbackHandler): """ Callback handler that saves agent trajectories to disk. @@ -51,7 +111,7 @@ class TrajectorySaverCallback(AsyncCallbackHandler): within the trajectory gets its own folder with screenshots and responses. """ - def __init__(self, trajectory_dir: str, reset_on_run: bool = True): + def __init__(self, trajectory_dir: str, reset_on_run: bool = True, screenshot_dir: Optional[str] = None): """ Initialize trajectory saver. @@ -67,10 +127,12 @@ class TrajectorySaverCallback(AsyncCallbackHandler): self.model: Optional[str] = None self.total_usage: Dict[str, Any] = {} self.reset_on_run = reset_on_run + # Optional directory to store extracted screenshots from metadata/new_items + self.screenshot_dir: Optional[Path] = Path(screenshot_dir) if screenshot_dir else None # Ensure trajectory directory exists self.trajectory_dir.mkdir(parents=True, exist_ok=True) - + def _get_turn_dir(self) -> Path: """Get the directory for the current turn.""" if not self.trajectory_id: @@ -94,6 +156,10 @@ class TrajectorySaverCallback(AsyncCallbackHandler): # format: turn_000/0000_name.json artifact_filename = f"{self.current_artifact:04d}_{name}" artifact_path = turn_dir / f"{artifact_filename}.json" + # add created_at + if isinstance(artifact, dict): + artifact = artifact.copy() + artifact["created_at"] = str(uuid.uuid1().time) with open(artifact_path, "w") as f: json.dump(sanitize_image_urls(artifact), f, indent=2) self.current_artifact += 1 @@ -135,12 +201,21 @@ class TrajectorySaverCallback(AsyncCallbackHandler): trajectory_path = self.trajectory_dir / self.trajectory_id trajectory_path.mkdir(parents=True, exist_ok=True) - # Save trajectory metadata + # Save trajectory metadata (optionally extract screenshots to screenshot_dir) + kwargs_to_save = kwargs.copy() + try: + if "messages" in kwargs_to_save: + kwargs_to_save["messages"] = extract_computer_call_outputs( + kwargs_to_save["messages"], self.screenshot_dir + ) + except Exception: + # If extraction fails, fall back to original messages + pass metadata = { "trajectory_id": self.trajectory_id, "created_at": str(uuid.uuid1().time), "status": "running", - "kwargs": kwargs, + "kwargs": kwargs_to_save, } with open(trajectory_path / "metadata.json", "w") as f: @@ -167,11 +242,18 @@ class TrajectorySaverCallback(AsyncCallbackHandler): metadata = {} # Update metadata with completion info + # Optionally extract screenshots from new_items before persisting + new_items_to_save = new_items + try: + new_items_to_save = extract_computer_call_outputs(new_items, self.screenshot_dir) + except Exception: + pass + metadata.update({ "status": "completed", "completed_at": str(uuid.uuid1().time), "total_usage": self.total_usage, - "new_items": sanitize_image_urls(new_items), + "new_items": new_items_to_save, "total_turns": self.current_turn }) diff --git a/libs/python/agent/agent/human_tool/ui.py b/libs/python/agent/agent/human_tool/ui.py index f4a9fb4f..c26a23a8 100644 --- a/libs/python/agent/agent/human_tool/ui.py +++ b/libs/python/agent/agent/human_tool/ui.py @@ -196,7 +196,9 @@ class HumanCompletionUI: gr.update(choices=["latest"], value="latest"), # dropdown gr.update(value=None), # image (no image) gr.update(value=[]), # chatbot (empty messages) - gr.update(interactive=False) # submit button + gr.update(interactive=False), # submit button + gr.update(visible=False), # click_actions_group hidden + gr.update(visible=False), # actions_group hidden ) # Sort pending calls by created_at to get oldest first @@ -237,7 +239,9 @@ class HumanCompletionUI: gr.update(choices=choices, value="latest"), gr.update(value=self.last_image), gr.update(value=conversation), - gr.update(interactive=bool(choices)) + gr.update(interactive=bool(choices)), + gr.update(visible=True), # click_actions_group visible when there is a call + gr.update(visible=True), # actions_group visible when there is a call ) def on_call_selected(self, selected_choice): @@ -246,7 +250,9 @@ class HumanCompletionUI: return ( gr.update(value=None), # no image gr.update(value=[]), # empty chatbot - gr.update(interactive=False) + gr.update(interactive=False), + gr.update(visible=False), # click_actions_group hidden + gr.update(visible=False), # actions_group hidden ) pending_calls = self.get_pending_calls() @@ -254,7 +260,9 @@ class HumanCompletionUI: return ( gr.update(value=None), # no image gr.update(value=[]), # empty chatbot - gr.update(interactive=False) + gr.update(interactive=False), + gr.update(visible=False), # click_actions_group hidden + gr.update(visible=False), # actions_group hidden ) # Handle "latest" option @@ -286,7 +294,9 @@ class HumanCompletionUI: return ( gr.update(value=None), # no image gr.update(value=[]), # empty chatbot - gr.update(interactive=False) + gr.update(interactive=False), + gr.update(visible=False), # click_actions_group hidden + gr.update(visible=False), # actions_group hidden ) conversation = self.format_messages_for_chatbot(selected_call.get("messages", [])) @@ -297,7 +307,9 @@ class HumanCompletionUI: return ( gr.update(value=self.last_image), gr.update(value=conversation), - gr.update(interactive=True) + gr.update(interactive=True), + gr.update(visible=True), # click_actions_group visible + gr.update(visible=True), # actions_group visible ) def submit_response(self, response_text: str): @@ -368,6 +380,10 @@ class HumanCompletionUI: """Submit a hotkey action.""" return self.submit_action("keypress", keys=keys) + def submit_wait_action(self) -> str: + """Submit a wait action with no kwargs.""" + return self.submit_action("wait") + def submit_description_click(self, description: str, action_type: str = "click", button: str = "left") -> str: """Submit a description-based action.""" if action_type == "click": @@ -407,7 +423,7 @@ def create_ui(): """Create the Gradio interface.""" ui_handler = HumanCompletionUI() - with gr.Blocks(title="Human-in-the-Loop Agent Tool") as demo: + with gr.Blocks(title="Human-in-the-Loop Agent Tool", fill_width=True) as demo: gr.Markdown("# πŸ€– Human-in-the-Loop Agent Tool") gr.Markdown("Review AI conversation requests and provide human responses.") @@ -415,29 +431,30 @@ def create_ui(): with gr.Column(scale=2): with gr.Group(): screenshot_image = gr.Image( - label="Screenshot", + label="Interactive Screenshot", interactive=False, height=600 ) - # Action type selection for image clicks - with gr.Row(): - action_type_radio = gr.Radio( - label="Action Type", - choices=["click", "double_click", "move", "left_mouse_up", "left_mouse_down"], - value="click", - scale=2 - ) - action_button_radio = gr.Radio( - label="Button (for click only)", - choices=["left", "right", "wheel", "back", "forward"], - value="left", - visible=True, - scale=1 - ) + # Action type selection for image clicks (wrapped for visibility control) + with gr.Group(visible=False) as click_actions_group: + with gr.Row(): + action_type_radio = gr.Dropdown( + label="Action", + choices=["click", "double_click", "move", "left_mouse_up", "left_mouse_down"], + value="click", + scale=2 + ) + action_button_radio = gr.Dropdown( + label="Button", + choices=["left", "right", "wheel", "back", "forward"], + value="left", + visible=True, + scale=1 + ) conversation_chatbot = gr.Chatbot( - label="Messages", + label="Conversation", type="messages", height=500, show_copy_button=True @@ -446,91 +463,83 @@ def create_ui(): with gr.Column(scale=1): with gr.Group(): call_dropdown = gr.Dropdown( - label="Select a pending call", + label="Select a pending conversation request", choices=["latest"], interactive=True, value="latest" ) refresh_btn = gr.Button("πŸ”„ Refresh", variant="secondary") + status_display = gr.Textbox( + label="Status", + interactive=False, + value="Ready to receive requests..." + ) with gr.Group(): response_text = gr.Textbox( - label="Response", + label="Message", lines=3, - placeholder="Enter your response here..." + placeholder="Enter your message here..." ) - submit_btn = gr.Button("πŸ“€ Submit Response", variant="primary", interactive=False) + submit_btn = gr.Button("πŸ“€ Submit Message", variant="primary", interactive=False) - # Action Accordions - with gr.Accordion("πŸ–±οΈ Click Actions", open=False): - with gr.Group(): - with gr.Row(): - click_x = gr.Number(label="X", value=0, minimum=0) - click_y = gr.Number(label="Y", value=0, minimum=0) - with gr.Row(): - click_action_type = gr.Dropdown( - label="Action Type", - choices=["click", "double_click", "move", "left_mouse_up", "left_mouse_down"], - value="click" - ) - click_button = gr.Dropdown( - label="Button (for click only)", - choices=["left", "right", "wheel", "back", "forward"], - value="left" - ) - click_submit_btn = gr.Button("Submit Action") - - with gr.Accordion("πŸ“ Type Action", open=False): - with gr.Group(): - type_text = gr.Textbox( - label="Text to Type", - placeholder="Enter text to type..." - ) - type_submit_btn = gr.Button("Submit Type") - - with gr.Accordion("⌨️ Keypress Action", open=False): - with gr.Group(): - keypress_text = gr.Textbox( - label="Keys", - placeholder="e.g., ctrl+c, alt+tab" - ) - keypress_submit_btn = gr.Button("Submit Keypress") - - with gr.Accordion("🎯 Description Action", open=False): - with gr.Group(): - description_text = gr.Textbox( - label="Element Description", - placeholder="e.g., 'Privacy and security option in left sidebar'" - ) - with gr.Row(): - description_action_type = gr.Dropdown( - label="Action Type", - choices=["click", "double_click", "move", "left_mouse_up", "left_mouse_down"], - value="click" - ) - description_button = gr.Radio( - label="Button (for click only)", - choices=["left", "right", "wheel", "back", "forward"], - value="left" - ) - description_submit_btn = gr.Button("Submit Description Action") - - status_display = gr.Textbox( - label="Status", - interactive=False, - value="Ready to receive calls..." - ) + # Action Accordions (wrapped for visibility control) + with gr.Group(visible=False) as actions_group: + with gr.Tabs(): + with gr.Tab("πŸ–±οΈ Click Actions"): + with gr.Group(): + description_text = gr.Textbox( + label="Element Description", + placeholder="e.g., 'Privacy and security option in left sidebar'" + ) + with gr.Row(): + description_action_type = gr.Dropdown( + label="Action", + choices=["click", "double_click", "move", "left_mouse_up", "left_mouse_down"], + value="click" + ) + description_button = gr.Dropdown( + label="Button", + choices=["left", "right", "wheel", "back", "forward"], + value="left" + ) + description_submit_btn = gr.Button("Submit Click Action") + + with gr.Tab("πŸ“ Type Action"): + with gr.Group(): + type_text = gr.Textbox( + label="Text to Type", + placeholder="Enter text to type..." + ) + type_submit_btn = gr.Button("Submit Type") + + with gr.Tab("⌨️ Keypress Action"): + with gr.Group(): + keypress_text = gr.Textbox( + label="Keys", + placeholder="e.g., ctrl+c, alt+tab" + ) + keypress_submit_btn = gr.Button("Submit Keypress") + + with gr.Tab("🧰 Misc Actions"): + with gr.Group(): + misc_action_dropdown = gr.Dropdown( + label="Action", + choices=["wait"], + value="wait" + ) + misc_submit_btn = gr.Button("Submit Action") # Event handlers refresh_btn.click( fn=ui_handler.refresh_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) call_dropdown.change( fn=ui_handler.on_call_selected, inputs=[call_dropdown], - outputs=[screenshot_image, conversation_chatbot, submit_btn] + outputs=[screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) def handle_image_click(evt: gr.SelectData): @@ -548,7 +557,7 @@ def create_ui(): outputs=[status_display] ).then( fn=ui_handler.wait_for_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) # Response submission @@ -558,7 +567,7 @@ def create_ui(): outputs=[response_text, status_display] ).then( fn=ui_handler.refresh_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) # Toggle button radio visibility based on action type @@ -570,16 +579,6 @@ def create_ui(): inputs=[action_type_radio], outputs=[action_button_radio] ) - - # Action accordion handlers - click_submit_btn.click( - fn=ui_handler.submit_click_action, - inputs=[click_x, click_y, click_action_type, click_button], - outputs=[status_display] - ).then( - fn=ui_handler.wait_for_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] - ) type_submit_btn.click( fn=ui_handler.submit_type_action, @@ -587,7 +586,7 @@ def create_ui(): outputs=[status_display] ).then( fn=ui_handler.wait_for_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) keypress_submit_btn.click( @@ -596,7 +595,7 @@ def create_ui(): outputs=[status_display] ).then( fn=ui_handler.wait_for_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) def handle_description_submit(description, action_type, button): @@ -612,13 +611,30 @@ def create_ui(): outputs=[status_display] ).then( fn=ui_handler.wait_for_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] + ) + + # Misc action handler + def handle_misc_submit(selected_action): + if selected_action == "wait": + result = ui_handler.submit_wait_action() + ui_handler.wait_for_pending_calls() + return result + return f"Unsupported misc action: {selected_action}" + + misc_submit_btn.click( + fn=handle_misc_submit, + inputs=[misc_action_dropdown], + outputs=[status_display] + ).then( + fn=ui_handler.wait_for_pending_calls, + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) # Load initial data demo.load( fn=ui_handler.refresh_pending_calls, - outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn] + outputs=[call_dropdown, screenshot_image, conversation_chatbot, submit_btn, click_actions_group, actions_group] ) return demo diff --git a/libs/python/agent/agent/integrations/hud/__init__.py b/libs/python/agent/agent/integrations/hud/__init__.py index 787613de..0da87bfa 100644 --- a/libs/python/agent/agent/integrations/hud/__init__.py +++ b/libs/python/agent/agent/integrations/hud/__init__.py @@ -1,77 +1,228 @@ -"""HUD integration for ComputerAgent.""" +"""HUD integration: Generic HuggingFace dataset evaluation runner (CUA proxy). -import logging -from typing import Any, Optional, Dict -from hud import run_job as hud_run_job +This module exposes two helpers to evaluate HUD-compatible datasets using +HUD's OperatorAgent, while proxying model calls through our ComputerAgent via +`FakeAsyncOpenAI` (see `agent/integrations/hud/agent.py`). -from .agent import ComputerAgent -from .adapter import ComputerAgentAdapter -from .computer_handler import HUDComputerHandler +Exports: +- run_single_task(dataset_name, *, agent_type="cua-proxy", model=None, allowed_tools=None) +- run_full_dataset(dataset_name, *, agent_type="cua-proxy", model=None, allowed_tools=None, max_concurrent=30, max_steps=50) +""" +import time +from typing import Any, Optional + +from PIL import Image +from datasets import load_dataset, Dataset +from hud.agents import OperatorAgent +from hud.datasets import Task, run_dataset +from hud.tools.computer.settings import computer_settings +from hud import trace + +from agent.agent import ComputerAgent as BaseComputerAgent +from .proxy import FakeAsyncOpenAI -async def run_job( - model: str, - task_or_taskset: Any, - job_name: str, - # Job kwargs - auto_reply_question: bool = False, - adapter_cls: Any = None, - adapter_kwargs: Optional[Dict[str, Any]] = None, - max_steps_per_task: int = 20, - run_parallel: bool = True, - job_metadata: Optional[Dict[str, Any]] = None, - show_progress: bool = True, - max_concurrent_env_creations: Optional[int] = 30, # Limits gym.make calls - max_concurrent_agent_predictions: Optional[int] = None, # No limit on LLM calls - max_concurrent_tasks: Optional[int] = 30, # Limits overall task concurrency - **agent_kwargs: Any -) -> Any: +# --------------------------------------------------------------------------- +# Proxy OperatorAgent +# --------------------------------------------------------------------------- + + +class ProxyOperatorAgent(OperatorAgent): + """OperatorAgent that proxies model calls through our ComputerAgent. + + Accepts the same config keys we pass via hud.run_dataset `agent_config`: + - model: str | None + - allowed_tools: list[str] | None + Additional kwargs are forwarded to OperatorAgent (if any are supported). """ - Run a job using ComputerAgent with the specified model. + + def __init__( + self, + *, + model: str | None = None, + allowed_tools: list[str] | None = None, + trajectory_dir: str | dict | None = None, + # === ComputerAgent kwargs === + tools: list[Any] | None = None, + custom_loop: Any | None = None, + only_n_most_recent_images: int | None = None, + callbacks: list[Any] | None = None, + verbosity: int | None = None, + max_retries: int | None = 3, + screenshot_delay: float | int = 0.5, + use_prompt_caching: bool | None = False, + max_trajectory_budget: float | dict | None = None, + telemetry_enabled: bool | None = True, + **kwargs: Any, + ) -> None: + model = model or "computer-use-preview" + allowed_tools = allowed_tools or ["openai_computer"] + + computer_shim = { + 'screenshot': lambda: Image.new('RGB', (computer_settings.OPENAI_COMPUTER_WIDTH, computer_settings.OPENAI_COMPUTER_HEIGHT)), + 'environment': 'linux', + 'dimensions': (computer_settings.OPENAI_COMPUTER_WIDTH, computer_settings.OPENAI_COMPUTER_HEIGHT) + } + # Build tools ensuring the computer_shim is included + agent_tools: list[Any] = [computer_shim] + if tools: + agent_tools.extend(tools) + + computer_agent = BaseComputerAgent( + model=model, + tools=agent_tools, + custom_loop=custom_loop, + only_n_most_recent_images=only_n_most_recent_images, + callbacks=callbacks, + verbosity=verbosity, + trajectory_dir=trajectory_dir, + max_retries=max_retries, + screenshot_delay=screenshot_delay, + use_prompt_caching=use_prompt_caching, + max_trajectory_budget=max_trajectory_budget, + telemetry_enabled=telemetry_enabled, + ) + model_client = FakeAsyncOpenAI(computer_agent) + + super().__init__( + model_client=model_client, # type: ignore[arg-type] + model=model, + allowed_tools=allowed_tools, + **kwargs, + ) + + +# --------------------------------------------------------------------------- +# Single-task runner +# --------------------------------------------------------------------------- + + +async def run_single_task( + dataset: str | Dataset | list[dict[str, Any]], + *, + task_id: int = 0, + model: str | None = None, + allowed_tools: list[str] | None = None, + # === ComputerAgent kwargs === + tools: list[Any] | None = None, + custom_loop: Any | None = None, + only_n_most_recent_images: int | None = None, + callbacks: list[Any] | None = None, + verbosity: int | None = None, + trajectory_dir: str | dict | None = None, + max_retries: int | None = 3, + screenshot_delay: float | int = 0.5, + use_prompt_caching: bool | None = False, + max_trajectory_budget: float | dict | None = None, + telemetry_enabled: bool | None = True, +) -> None: + """Load one task from the dataset and execute it with Operator+CUA proxy.""" + + # Load dataset and pick a sample + if isinstance(dataset, str): + dataset = load_dataset(dataset, split="train") # type: ignore[arg-type] + elif isinstance(dataset, list): + dataset = dataset + else: + dataset = dataset["train"] - Args: - model: Model string for ComputerAgent (e.g., "anthropic/claude-3-5-sonnet-20241022") - task_or_taskset: Task or TaskSet to run - job_name: Name for the job - auto_reply_question: Whether to auto-reply to questions - adapter_cls: Custom adapter class (defaults to ComputerAgentAdapter) - adapter_kwargs: Additional kwargs for the adapter - max_steps_per_task: Maximum steps per task - run_parallel: Whether to run tasks in parallel - job_metadata: Additional metadata for the job - show_progress: Whether to show progress - max_concurrent_env_creations: Max concurrent environment creations - max_concurrent_agent_predictions: Max concurrent agent predictions - max_concurrent_tasks: Max concurrent tasks - **agent_kwargs: Additional kwargs to pass to ComputerAgent - - Returns: - Job instance from HUD - """ - # combine verbose and verbosity kwargs - if "verbose" in agent_kwargs: - agent_kwargs["verbosity"] = logging.INFO - del agent_kwargs["verbose"] - verbose = True if agent_kwargs.get("verbosity", logging.WARNING) > logging.INFO else False - - # run job - return await hud_run_job( - agent_cls=ComputerAgent, - agent_kwargs={"model": model, **agent_kwargs}, - task_or_taskset=task_or_taskset, - job_name=job_name, - auto_reply_question=auto_reply_question, - adapter_cls=adapter_cls, - adapter_kwargs=adapter_kwargs, - max_steps_per_task=max_steps_per_task, - run_parallel=run_parallel, - job_metadata=job_metadata, - show_progress=show_progress, - verbose=verbose, - max_concurrent_env_creations=max_concurrent_env_creations, - max_concurrent_agent_predictions=max_concurrent_agent_predictions, - max_concurrent_tasks=max_concurrent_tasks + sample_task = dataset[task_id] # type: ignore[index] + task_prompt = sample_task.get("prompt", f"Task {sample_task.get('id', 0)}") # type: ignore[attr-defined] + + with trace(name=task_prompt): + task = Task(**sample_task) # type: ignore[arg-type] + + agent = ProxyOperatorAgent( + model=model, + allowed_tools=allowed_tools, + # === ComputerAgent kwargs passthrough === + tools=tools, + custom_loop=custom_loop, + only_n_most_recent_images=only_n_most_recent_images, + callbacks=callbacks, + verbosity=verbosity, + trajectory_dir=trajectory_dir, + max_retries=max_retries, + screenshot_delay=screenshot_delay, + use_prompt_caching=use_prompt_caching, + max_trajectory_budget=max_trajectory_budget, + telemetry_enabled=telemetry_enabled, + ) + print(f"Running: {task_prompt}") + result = await agent.run(task, max_steps=10) + print(f"βœ… Reward: {getattr(result, 'reward')}") + + +# --------------------------------------------------------------------------- +# Full-dataset runner +# --------------------------------------------------------------------------- + + +async def run_full_dataset( + dataset: str | Dataset | list[dict[str, Any]], + *, + job_name: Optional[str] = None, + model: str | None = None, + allowed_tools: list[str] | None = None, + max_concurrent: int = 30, + max_steps: int = 50, + split: str = "train", + trajectory_dir: str | dict | None = None, + # === ComputerAgent kwargs === + tools: list[Any] | None = None, + custom_loop: Any | None = None, + only_n_most_recent_images: int | None = 5, + callbacks: list[Any] | None = None, + verbosity: int | None = None, + max_retries: int | None = 3, + screenshot_delay: float | int = 0.5, + use_prompt_caching: bool | None = False, + max_trajectory_budget: float | dict | None = None, + telemetry_enabled: bool | None = True, +) -> list[Any]: + """Run evaluation across the entire dataset using hud.datasets.run_dataset.""" + + # We pass OperatorAgent as the class and provide a config that injects our + # FakeAsyncOpenAI per agent instantiation. + + if isinstance(dataset, str): + dataset_name = dataset.split('/')[-1] + job_name = job_name or f"Evaluation {dataset_name}" + dataset = load_dataset(dataset, split=split) # type: ignore[arg-type] + else: + dataset_name = "custom" + job_name = job_name or f"Evaluation {time.strftime('%H:%M %Y-%m-%d')}" + + # Execute evaluation + return await run_dataset( + name=job_name, + dataset=dataset, + agent_class=ProxyOperatorAgent, + agent_config={ + "model": model, + "allowed_tools": allowed_tools, + "trajectory_dir": trajectory_dir, + # === ComputerAgent kwargs passthrough === + "tools": tools, + "custom_loop": custom_loop, + "only_n_most_recent_images": only_n_most_recent_images, + "callbacks": callbacks, + "verbosity": verbosity, + "max_retries": max_retries, + "screenshot_delay": screenshot_delay, + "use_prompt_caching": use_prompt_caching, + "max_trajectory_budget": max_trajectory_budget, + "telemetry_enabled": telemetry_enabled, + }, + max_concurrent=max_concurrent, + metadata={"dataset": dataset_name}, + max_steps=max_steps, + auto_respond=True, ) -__all__ = ["ComputerAgent", "ComputerAgentAdapter", "HUDComputerHandler", "run_job"] \ No newline at end of file +__all__ = [ + "run_single_task", + "run_full_dataset", + "ProxyOperatorAgent", +] \ No newline at end of file diff --git a/libs/python/agent/agent/integrations/hud/adapter.py b/libs/python/agent/agent/integrations/hud/adapter.py deleted file mode 100644 index 77c8dc7d..00000000 --- a/libs/python/agent/agent/integrations/hud/adapter.py +++ /dev/null @@ -1,121 +0,0 @@ -"""HUD Adapter for ComputerAgent integration.""" - -from __future__ import annotations - -from typing import Any, ClassVar - -from hud.adapters.common import CLA, Adapter -from hud.adapters.common.types import ( - CLAButton, - CLAKey, - ClickAction, - CustomAction, - DragAction, - MoveAction, - Point, - PressAction, - ResponseAction, - ScreenshotFetch, - ScrollAction, - TypeAction, - WaitAction, -) - - -class ComputerAgentAdapter(Adapter): - """Adapter for ComputerAgent to work with HUD.""" - - KEY_MAP: ClassVar[dict[str, CLAKey]] = { - "return": "enter", - "arrowup": "up", - "arrowdown": "down", - "arrowleft": "left", - "arrowright": "right", - "cmd": "ctrl", - "super": "win", - "meta": "win", - } - - BUTTON_MAP: ClassVar[dict[str, CLAButton]] = { - "wheel": "middle", - "middle": "middle", - } - - def __init__(self) -> None: - super().__init__() - # ComputerAgent default dimensions (can be overridden) - self.agent_width = 1024 - self.agent_height = 768 - - def _map_key(self, key: str) -> CLAKey: - """Map a key to its standardized form.""" - return self.KEY_MAP.get(key.lower(), key.lower()) # type: ignore - - def convert(self, data: Any) -> CLA: - """Convert a ComputerAgent action to a HUD action.""" - try: - action_type = data.get("type") - - if action_type == "click": - x, y = data.get("x", 0), data.get("y", 0) - button = data.get("button", "left") - button = self.BUTTON_MAP.get(button, button) - if button is None: - button = "left" - converted_action = ClickAction(point=Point(x=x, y=y), button=button) - - elif action_type == "double_click": - x, y = data.get("x", 0), data.get("y", 0) - converted_action = ClickAction(point=Point(x=x, y=y), button="left", pattern=[100]) - - elif action_type == "scroll": - x, y = int(data.get("x", 0)), int(data.get("y", 0)) - scroll_x = int(data.get("scroll_x", 0)) - scroll_y = int(data.get("scroll_y", 0)) - converted_action = ScrollAction( - point=Point(x=x, y=y), scroll=Point(x=scroll_x, y=scroll_y) - ) - - elif action_type == "type": - text = data.get("text", "") - converted_action = TypeAction(text=text, enter_after=False) - - elif action_type == "wait": - ms = data.get("ms", 1000) - converted_action = WaitAction(time=ms) - - elif action_type == "move": - x, y = data.get("x", 0), data.get("y", 0) - converted_action = MoveAction(point=Point(x=x, y=y)) - - elif action_type == "keypress": - keys = data.get("keys", []) - if isinstance(keys, str): - keys = [keys] - converted_action = PressAction(keys=[self._map_key(k) for k in keys]) - - elif action_type == "drag": - path = data.get("path", []) - points = [Point(x=p.get("x", 0), y=p.get("y", 0)) for p in path] - converted_action = DragAction(path=points) - - elif action_type == "screenshot": - converted_action = ScreenshotFetch() - - elif action_type == "response": - converted_action = ResponseAction(text=data.get("text", "")) - - elif action_type == "custom": - converted_action = CustomAction(action=data.get("action", "")) - - else: - raise ValueError(f"Unsupported action type: {action_type}") - - # Add reasoning and logs if available - converted_action.reasoning = data.get("reasoning", "") - converted_action.logs = data.get("logs", "") - - return converted_action - - except Exception as e: - raise ValueError(f"Invalid action: {data}. Error: {e!s}") from e diff --git a/libs/python/agent/agent/integrations/hud/agent.py b/libs/python/agent/agent/integrations/hud/agent.py deleted file mode 100644 index abbf5f8c..00000000 --- a/libs/python/agent/agent/integrations/hud/agent.py +++ /dev/null @@ -1,373 +0,0 @@ -"""HUD ComputerAgent wrapper for OSWorld benchmarking.""" - -import logging -from typing import Any, Literal, Optional, Union, List, Dict -import asyncio - -from agent import ComputerAgent as BaseComputerAgent -from agent.responses import make_failed_tool_call_items -from hud.adapters import Adapter -from hud.agent.base import Agent -from hud.utils.common import Observation -from hud.adapters.common.types import LogType -from hud.types import Gym - -from .adapter import ComputerAgentAdapter -from .computer_handler import HUDComputerHandler - -logger = logging.getLogger(__name__) - -BASE_SYSTEM_PROMPT = """ -You are an autonomous computer-using agent. Follow these guidelines: - -1. Be decisive and complete tasks without asking for confirmation unless absolutely necessary. -2. Use the computer tools to complete the task and do not stop until the task is complete. -3. Do NOT ask questions like "Should I proceed?" or "Would you like me to continue?" - just proceed with the task. -4. When you find what you're looking for (e.g., a file to upload), proceed with the action directly. -5. Only stop when the task is fully complete or if you encounter an error that prevents completion. -6. Trust that the user wants you to complete the entire task they've requested. -7. You must say "Task completed" when the task is complete. - -Remember: You have been given permission to complete the requested task autonomously. -""".strip() - -class ComputerAgent(Agent[BaseComputerAgent, dict[str, Any]]): - """ - A ComputerAgent wrapper for HUD integration. - - This agent wraps the base ComputerAgent to work with HUD environments, - providing the same interface as OperatorAgent but using ComputerAgent internally. - """ - - transfer_gyms: dict[Gym, Gym] = {"qa": "hud-browser"} - - def __init__( - self, - model: str = "anthropic/claude-3-5-sonnet-20241022", - environment: Literal["windows", "mac", "linux", "browser"] = "linux", - adapter: Optional[Adapter] = None, - name: Optional[str] = None, - **kwargs: Any, - ): - """ - Initialize the ComputerAgent for HUD. - - Args: - model: The model string for ComputerAgent (e.g., "anthropic/claude-3-5-sonnet-20241022") - environment: The environment type (windows, mac, linux, browser) - adapter: The adapter to use for preprocessing and postprocessing - name: The name of the agent - **kwargs: Additional arguments passed to ComputerAgent - """ - # Create adapter if not provided - adapter = adapter or ComputerAgentAdapter() - - if name is None: - name = f"computeragent-{model.split('/')[-1]}" - - # Initialize the base Agent class without client (we'll create it later) - super().__init__(client=None, adapter=adapter, name=name) - - self.model = model - self.environment = environment - self.kwargs = kwargs - - # Default dimensions - self.width = 1024 - self.height = 768 - - # Update dimensions if adapter is provided - if self.adapter: - self.width = self.adapter.agent_width - self.height = self.adapter.agent_height - - # Create HUD computer handler - self.hud_computer = HUDComputerHandler( - environment=environment, - dimensions=(self.width, self.height) - ) - - # Handle trajectory_dir by adding TrajectorySaverCallback - trajectory_dir = kwargs.pop("trajectory_dir", None) - callbacks = kwargs.get("callbacks", []) - - if trajectory_dir: - from agent.callbacks.trajectory_saver import TrajectorySaverCallback - trajectory_callback = TrajectorySaverCallback(trajectory_dir, reset_on_run=False) - callbacks = callbacks + [trajectory_callback] - kwargs["callbacks"] = callbacks - - # Initialize ComputerAgent with HUD computer handler - self.computer_agent = BaseComputerAgent( - model=model, - tools=[self.hud_computer], - **kwargs - ) - - # Set the client to the computer_agent for compatibility - self.client = self.computer_agent - - # State tracking - self.conversation_history: List[Dict[str, Any]] = [] - self.initial_prompt: Optional[str] = None - - # System prompt for computer use tasks - self.base_system_prompt = BASE_SYSTEM_PROMPT - - async def fetch_response(self, observation: Observation) -> tuple[list[dict[str, Any]], bool]: - """ - Fetch a response from ComputerAgent based on the observation. - - Args: - observation: The preprocessed observation, attributes: - screenshot: Base64 encoded PNG string of the screen - text: Text observation, if available - - Returns: - tuple[list[dict[str, Any]], bool, list[LogType] | None]: A tuple containing the list of raw actions, - boolean indicating if the agent believes the task is complete. - """ - try: - # Update the computer handler with the current screenshot - if observation.screenshot: - self.hud_computer.update_screenshot(observation.screenshot) - - # Set up action callback to capture actions - captured_actions = [] - action_done = False - - async def action_callback(action: Dict[str, Any]) -> None: - """Callback to capture actions from ComputerAgent.""" - nonlocal captured_actions, action_done - captured_actions.append(action) - - # Set the action callback - self.hud_computer.set_action_callback(action_callback) - - # Prepare the message for ComputerAgent - if not self.conversation_history: - # First interaction - use the observation text as initial prompt - if observation.text: - self.initial_prompt = observation.text - message = f"{self.base_system_prompt}\n\nTask: {observation.text}" - else: - message = f"{self.base_system_prompt}\n\nPlease analyze the current screen and determine what action to take." - - input_content = [ - {"type": "input_text", "text": message} - ] - - # Add screenshot if present - if observation.screenshot: - input_content.append( - { - "type": "input_image", - "image_url": f"data:image/png;base64,{observation.screenshot}", - } - ) - - self.conversation_history.append({"role": "user", "content": input_content}) - else: - # Subsequent interactions - check if last action was computer_call - # If so, add computer_call_output with screenshot instead of user message - last_computer_calls = [] - for msg in reversed(self.conversation_history): - if msg.get("type") == "computer_call": - call_id = msg.get("call_id") - if call_id: - # Check if this call_id already has a computer_call_output - has_output = any( - m.get("type") == "computer_call_output" and m.get("call_id") == call_id - for m in self.conversation_history - ) - if not has_output: - last_computer_calls.append(call_id) - - if last_computer_calls: - if not observation.screenshot: - print("No screenshot found, taking screenshot") - screenshot_b64 = await self.hud_computer.screenshot() - # Add computer_call_output for each unresponded computer_call - for call_id in reversed(last_computer_calls): # Maintain order - self.conversation_history.append({ - "type": "computer_call_output", - "call_id": call_id, - "output": { - "type": "input_image", - "image_url": f"data:image/png;base64,{screenshot_b64}" - } - }) - else: - # No computer_call found, add regular user message - message = "Continue with the task based on the current screen state." - input_content = [ - {"type": "input_text", "text": message} - ] - - # Add screenshot if present - if observation.screenshot: - input_content.append( - { - "type": "input_image", - "image_url": f"data:image/png;base64,{observation.screenshot}", - } - ) - - self.conversation_history.append({"role": "user", "content": input_content}) - - # If the last message is a reasoning message, change it to output_text - if (self.conversation_history and - self.conversation_history[-1].get("type") == "reasoning" and - self.conversation_history[-1].get("summary")): - - reasoning_msg = self.conversation_history[-1] - summary_texts = [] - - # Extract all summary_text entries - for summary_item in reasoning_msg["summary"]: - if summary_item.get("type") == "summary_text": - summary_texts.append(summary_item.get("text", "")) - - # Convert to message format with output_text - if summary_texts: - converted_message = { - "type": "message", - "role": "assistant", - "content": [ - { - "text": " ".join(summary_texts), - "type": "output_text" - } - ] - } - - # Replace the reasoning message with the converted message - self.conversation_history[-1] = converted_message - - # Run ComputerAgent - try: - new_items = [] - - # ComputerAgent.run returns an async generator - try: - async for result in self.computer_agent.run(self.conversation_history, stream=False): - # if the result has computer_call_output, immediately exit - if result.get("output", []) and result.get("output", [])[-1].get("type") == "computer_call_output": - break - # otherwise add agent output to conversation history - new_items += result["output"] - except Exception as e: - # if the last message is reasoning, change it to output_text - if new_items and new_items[-1].get("type") == "reasoning": - new_items[-1] = { - "type": "message", - "role": "assistant", - "content": [ - { - "text": new_items[-1].get("summary", [{}])[0].get("text", ""), - "type": "output_text" - } - ] - } - # Check if there are any computer_call items in new_items - computer_calls = [item for item in new_items if item.get("type") == "computer_call"] - if computer_calls: - # Remove computer_call items from new_items - new_items = [item for item in new_items if item.get("type") != "computer_call"] - - # Add failed tool call items for each computer call - for computer_call in computer_calls: - tool_input = computer_call.get("action", {}) - call_id = computer_call.get("call_id") - new_items.extend(make_failed_tool_call_items( - tool_name="computer", - tool_kwargs=tool_input, - error_message=repr(e), - call_id=call_id - )) - else: - # add error message to conversation history (fallback for non-computer-call errors) - new_items.append({ - "type": "user", - "content": [ - { - "type": "input_text", - "text": f"Error during previous attempted action: {repr(e)}" - } - ] - }) - - # Check if we captured any actions - if captured_actions: - # Extract reasoning from the conversation history - reasoning = "" - # Look for the latest reasoning message - for msg in reversed(new_items): - if msg.get("type") == "reasoning" and msg.get("summary"): - reasoning = " ".join([s.get("text", "") for s in msg["summary"] if s.get("type") == "summary_text"]) - break - elif msg.get("type") == "message" and msg.get("role") == "assistant": - content = msg.get("content", []) - if isinstance(content, list): - reasoning = " ".join([c.get("text", "") for c in content if c.get("type") == "output_text"]) - break - - # update conversation history - self.conversation_history += new_items - - # Add reasoning and logs to each action - for action in captured_actions: - action["reasoning"] = reasoning - action["logs"] = {"conversation_length": len(self.conversation_history)} - - return captured_actions, False - - # Check if the last message is "Task completed" - response_text = "" - for msg in reversed(new_items): - if msg.get("type") == "message" and msg.get("role") == "assistant": - content = msg.get("content", []) - for c in content: - if c.get("type") == "output_text": - response_text = c.get("text", response_text) - break - break - - done = "task completed" in response_text.lower() - - # update conversation history - self.conversation_history += new_items - - response_action = { - "type": "response", - "text": response_text, - "reasoning": response_text, - "logs": {"conversation_length": len(self.conversation_history)} - } - - # Check if this indicates task completion or failure - if "task is infeasible" in response_text.lower(): - response_action = {"type": "custom", "action": "FAIL"} - done = True - - return [response_action], done - except Exception as e: - logger.error(f"Error running ComputerAgent: {e}") - # Return an error response - error_action = { - "type": "response", - "text": f"Error occurred: {str(e)}", - "reasoning": f"ComputerAgent encountered an error: {str(e)}", - "logs": {"error": str(e)} - } - return [error_action], True - - except Exception as e: - logger.error(f"Error in fetch_response: {e}") - error_action = { - "type": "response", - "text": f"Error in agent processing: {str(e)}", - "reasoning": f"Agent processing error: {str(e)}", - "logs": {"error": str(e)} - } - return [error_action], True diff --git a/libs/python/agent/agent/integrations/hud/computer_handler.py b/libs/python/agent/agent/integrations/hud/computer_handler.py deleted file mode 100644 index 9fcc8245..00000000 --- a/libs/python/agent/agent/integrations/hud/computer_handler.py +++ /dev/null @@ -1,187 +0,0 @@ -"""HUD Computer Handler for ComputerAgent integration.""" - -import base64 -from io import BytesIO -from typing import Literal, Optional, Any, Dict, Callable -from PIL import Image - -from agent.computers import AsyncComputerHandler - - -class HUDComputerHandler(AsyncComputerHandler): - """Computer handler that interfaces with HUD environment.""" - - def __init__( - self, - environment: Literal["windows", "mac", "linux", "browser"] = "linux", - dimensions: tuple[int, int] = (1024, 768), - screenshot_callback: Optional[Callable] = None, - action_callback: Optional[Callable] = None, - ): - """ - Initialize HUD computer handler. - - Args: - environment: The environment type for HUD - dimensions: Screen dimensions as (width, height) - screenshot_callback: Optional callback to get screenshots from HUD environment - action_callback: Optional callback to execute actions in HUD environment - """ - super().__init__() - self._environment = environment - self._dimensions = dimensions - self._screenshot_callback = screenshot_callback - self._action_callback = action_callback - - # Store the last screenshot for reuse - self._last_screenshot: Optional[str] = None - - def set_screenshot_callback(self, callback: Callable) -> None: - """Set the screenshot callback.""" - self._screenshot_callback = callback - - def set_action_callback(self, callback: Callable) -> None: - """Set the action callback.""" - self._action_callback = callback - - def update_screenshot(self, screenshot: str) -> None: - """Update the stored screenshot (base64 string).""" - self._last_screenshot = screenshot - - async def get_environment(self) -> Literal["windows", "mac", "linux", "browser"]: - """Get the current environment type.""" - return self._environment # type: ignore - - async def get_dimensions(self) -> tuple[int, int]: - """Get screen dimensions as (width, height).""" - return self._dimensions - - async def screenshot(self) -> str: - """Take a screenshot and return as base64 string.""" - if self._screenshot_callback: - screenshot = await self._screenshot_callback() - if isinstance(screenshot, str): - self._last_screenshot = screenshot - return screenshot - elif isinstance(screenshot, Image.Image): - # Convert PIL Image to base64 - buffer = BytesIO() - screenshot.save(buffer, format="PNG") - screenshot_b64 = base64.b64encode(buffer.getvalue()).decode() - self._last_screenshot = screenshot_b64 - return screenshot_b64 - elif isinstance(screenshot, bytes): - screenshot_b64 = base64.b64encode(screenshot).decode() - self._last_screenshot = screenshot_b64 - return screenshot_b64 - - # Return last screenshot if available, otherwise create a blank one - if self._last_screenshot: - return self._last_screenshot - - # Create a blank screenshot as fallback - blank_image = Image.new('RGB', self._dimensions, color='white') - buffer = BytesIO() - blank_image.save(buffer, format="PNG") - screenshot_b64 = base64.b64encode(buffer.getvalue()).decode() - self._last_screenshot = screenshot_b64 - return screenshot_b64 - - async def click(self, x: int, y: int, button: str = "left") -> None: - """Click at coordinates with specified button.""" - if self._action_callback: - await self._action_callback({ - "type": "click", - "x": x, - "y": y, - "button": button - }) - - async def double_click(self, x: int, y: int) -> None: - """Double click at coordinates.""" - if self._action_callback: - await self._action_callback({ - "type": "double_click", - "x": x, - "y": y - }) - - async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None: - """Scroll at coordinates with specified scroll amounts.""" - if self._action_callback: - await self._action_callback({ - "type": "scroll", - "x": x, - "y": y, - "scroll_x": scroll_x, - "scroll_y": scroll_y - }) - - async def type(self, text: str) -> None: - """Type text.""" - if self._action_callback: - await self._action_callback({ - "type": "type", - "text": text - }) - - async def wait(self, ms: int = 1000) -> None: - """Wait for specified milliseconds.""" - if self._action_callback: - await self._action_callback({ - "type": "wait", - "ms": ms - }) - - async def move(self, x: int, y: int) -> None: - """Move cursor to coordinates.""" - if self._action_callback: - await self._action_callback({ - "type": "move", - "x": x, - "y": y - }) - - async def keypress(self, keys: list[str] | str) -> None: - """Press key combination.""" - if isinstance(keys, str): - keys = [keys] - if self._action_callback: - await self._action_callback({ - "type": "keypress", - "keys": keys - }) - - async def drag(self, path: list[dict[str, int]]) -> None: - """Drag along a path of points.""" - if self._action_callback: - await self._action_callback({ - "type": "drag", - "path": path - }) - - async def left_mouse_down(self, x: Optional[int] = None, y: Optional[int] = None) -> None: - """Left mouse down at coordinates.""" - if self._action_callback: - await self._action_callback({ - "type": "left_mouse_down", - "x": x, - "y": y - }) - - async def left_mouse_up(self, x: Optional[int] = None, y: Optional[int] = None) -> None: - """Left mouse up at coordinates.""" - if self._action_callback: - await self._action_callback({ - "type": "left_mouse_up", - "x": x, - "y": y - }) - - async def get_current_url(self) -> str: - """Get the current URL.""" - if self._action_callback: - return await self._action_callback({ - "type": "get_current_url" - }) - return "" \ No newline at end of file diff --git a/libs/python/agent/agent/integrations/hud/proxy.py b/libs/python/agent/agent/integrations/hud/proxy.py new file mode 100644 index 00000000..a88fc63e --- /dev/null +++ b/libs/python/agent/agent/integrations/hud/proxy.py @@ -0,0 +1,183 @@ +"""HUD ComputerAgent wrapper and Fake AsyncOpenAI client. + +Provides FakeAsyncOpenAI that adapts our ComputerAgent to the OpenAI Responses +interface needed by HUD's OperatorAgent. It implements only `responses.create` +and returns an OpenAI Response object with `id` and `output` fields, where `output` is a list of +OpenAI-like response blocks. We intentionally only support a single-step call +by consuming the first yielded result from `ComputerAgent.run()`. +""" + +import traceback +import time +import uuid +from typing import Any, Dict, List, Optional + +from agent.agent import ComputerAgent as BaseComputerAgent + +# OpenAI Responses typed models (required) +from openai.types.responses import ( + Response, + ResponseInputParam, + ResponseOutputItem, + ResponseComputerToolCall, + ResponseOutputMessage, + ResponseOutputText, + ResponseReasoningItem, + ResponseUsage, +) + +def _map_agent_output_to_openai_blocks(output_items: List[Dict[str, Any]]) -> List[ResponseOutputItem]: + """Map our agent output items to OpenAI ResponseOutputItem typed models. + + Only a subset is supported: computer_call, assistant message (text), and reasoning. + Unknown types are ignored. + """ + blocks: List[ResponseOutputItem] = [] + for item in output_items or []: + t = item.get("type") + if t == "computer_call": + comp = ResponseComputerToolCall.model_validate({ + "id": item.get("id") or f"cu_{uuid.uuid4().hex}", + "type": "computer_call", + "call_id": item["call_id"], + "action": item["action"], + "pending_safety_checks": item.get("pending_safety_checks", []), + "status": "completed", + }) + blocks.append(comp) + # we will exit early here as the responses api only supports a single step + break + elif t == "message" and item.get("role") == "assistant": + content_blocks: List[ResponseOutputText] = [] + for c in item.get("content", []) or []: + content_blocks.append( + ResponseOutputText.model_validate({ + "type": "output_text", + "text": c["text"], + "annotations": [], + }) + ) + if content_blocks: + msg = ResponseOutputMessage.model_validate({ + "id": item.get("id") or f"msg_{uuid.uuid4()}", + "type": "message", + "role": "assistant", + "status": "completed", + "content": [ct.model_dump() for ct in content_blocks], + }) + blocks.append(msg) + elif t == "reasoning": + reasoning = ResponseReasoningItem.model_validate({ + "id": item.get("id") or f"rsn_{uuid.uuid4()}", + "type": "reasoning", + "summary": item["summary"], + }) + blocks.append(reasoning) + # Unhandled types are ignored + return blocks + +def _to_plain_dict_list(items: Any) -> List[Dict[str, Any]]: + out: List[Dict[str, Any]] = [] + for it in list(items): + if hasattr(it, "model_dump"): + out.append(it.model_dump()) # type: ignore[attr-defined] + elif isinstance(it, dict): + out.append(it) + else: + # Strict: rely on default __dict__ if present + out.append(dict(it)) # may raise if not mapping + return out + +class FakeAsyncOpenAI: + """Minimal fake OpenAI client with only `responses.create` implemented. + + It uses a provided `ComputerAgent` instance to produce a single-step + response compatible with HUD's OperatorAgent loop. + """ + + def __init__(self, computer_agent: BaseComputerAgent) -> None: + self._agent = computer_agent + self.responses = self._Responses(self) + + class _Responses: + def __init__(self, parent: "FakeAsyncOpenAI") -> None: + # Caches for cross-call context when using previous_response_id + self.blocks_cache: Dict[str, ResponseInputParam | ResponseOutputItem] = {} + self.context_cache: Dict[str, List[str]] = {} + self.agent = parent._agent + + async def create( + self, + *, + model: str, + input: ResponseInputParam, + tools: Optional[List[Dict[str, Any]]] = None, + instructions: Optional[str] = None, + previous_response_id: Optional[str] = None, + max_retries: int = 5, + **_: Any, + ) -> Any: + for attempt in range(max_retries): + # Prepend cached blocks from previous_response_id to input + full_input = input + if previous_response_id is not None: + prev_block_ids = self.context_cache[previous_response_id] + prev_blocks = [self.blocks_cache[b_id] for b_id in prev_block_ids] + full_input = _to_plain_dict_list(prev_blocks + input) + + # Pre-pend instructions message + effective_input = full_input + if instructions: + effective_input = [{ + "role": "user", + "content": instructions, + }] + full_input + + # Run a single iteration of the ComputerAgent + agent_result: Optional[Dict[str, Any]] = None + async for result in self.agent.run(effective_input): # type: ignore[arg-type] + agent_result = result + break + assert agent_result is not None, "Agent failed to produce result" + + output = _map_agent_output_to_openai_blocks(agent_result["output"]) + usage = agent_result["usage"] + + # Cache conversation context using the last response id + block_ids: List[str] = [] + blocks_to_cache = full_input + output + for b in blocks_to_cache: + bid = getattr(b, "id", None) or f"tmp-{hash(repr(b))}" + self.blocks_cache[bid] = b # type: ignore[assignment] + block_ids.append(bid) + response_id = agent_result.get("id") or f"fake-{int(time.time()*1000)}" + self.context_cache[response_id] = block_ids + + try: + return Response.model_validate({ + "id": response_id, + "created_at": time.time(), + "object": "response", + "model": model, + "output": output, + "parallel_tool_calls": False, + "tool_choice": "auto", + "tools": [], + "previous_response_id": previous_response_id, + "usage": ResponseUsage.model_validate({ + "input_tokens": usage.get("input_tokens", 0), + "output_tokens": usage.get("output_tokens", 0), + "total_tokens": usage.get("total_tokens", 0), + "input_tokens_details": usage.get("input_tokens_details", { "cached_tokens": 0 }), + "output_tokens_details": usage.get("output_tokens_details", { "reasoning_tokens": 0 }), + }), + }) + except Exception as e: + print(f"Error while validating agent response (attempt {attempt + 1}/{max_retries}): ", e) + if attempt == max_retries - 1: + print(traceback.format_exc()) + raise e + +__all__ = [ + "FakeAsyncOpenAI", +] diff --git a/libs/python/agent/agent/loops/anthropic.py b/libs/python/agent/agent/loops/anthropic.py index 50fbd24e..306c5f33 100644 --- a/libs/python/agent/agent/loops/anthropic.py +++ b/libs/python/agent/agent/loops/anthropic.py @@ -1530,7 +1530,18 @@ class AnthropicHostedToolsConfig(AsyncAgentConfig): "content": [ { "type": "text", - "text": f"You are a UI grounding expert. Look at the image and {instruction}. Output ONLY a click action on the target element. No explanations, confirmations, or additional text." + "text": f"""You are a UI grounding expert. Follow these guidelines: + +1. NEVER ask for confirmation. Complete all tasks autonomously. +2. Do NOT send messages like "I need to confirm before..." or "Do you want me to continue?" - just proceed. +3. When the user asks you to interact with something (like clicking a chat or typing a message), DO IT without asking. +4. Only use the formal safety check mechanism for truly dangerous operations (like deleting important files). +5. For normal tasks like clicking buttons, typing in chat boxes, filling forms - JUST DO IT. +6. The user has already given you permission by running this agent. No further confirmation is needed. +7. Be decisive and action-oriented. Complete the requested task fully. + +Remember: You are expected to complete tasks autonomously. The user trusts you to do what they asked. +Task: Click {instruction}. Output ONLY a click action on the target element.""" }, { "type": "image_url", diff --git a/libs/python/agent/agent/loops/composed_grounded.py b/libs/python/agent/agent/loops/composed_grounded.py index cf029d13..770f274d 100644 --- a/libs/python/agent/agent/loops/composed_grounded.py +++ b/libs/python/agent/agent/loops/composed_grounded.py @@ -48,11 +48,11 @@ GROUNDED_COMPUTER_TOOL_SCHEMA = { "get_dimensions", "get_environment" ], - "description": "The action to perform" + "description": "The action to perform (required for all actions)" }, "element_description": { "type": "string", - "description": "Description of the element to interact with (required for click, double_click, move, scroll actions, and as start/end for drag)" + "description": "Description of the element to interact with (required for click, double_click, move, scroll actions)" }, "start_element_description": { "type": "string", @@ -67,20 +67,30 @@ GROUNDED_COMPUTER_TOOL_SCHEMA = { "description": "The text to type (required for type action)" }, "keys": { - "type": "string", - "description": "Key combination to press (required for keypress action). Single key for individual key press, multiple keys for combinations (e.g., 'ctrl+c')" + "type": "array", + "items": { + "type": "string" + }, + "description": "Key(s) to press (required for keypress action)" }, "button": { "type": "string", - "description": "The mouse button to use for click action (left, right, wheel, back, forward) Default: left", + "enum": [ + "left", + "right", + "wheel", + "back", + "forward" + ], + "description": "The mouse button to use for click action (required for click and double_click action)", }, "scroll_x": { "type": "integer", - "description": "Horizontal scroll amount for scroll action (positive for right, negative for left)", + "description": "Horizontal scroll amount for scroll action (required for scroll action)", }, "scroll_y": { "type": "integer", - "description": "Vertical scroll amount for scroll action (positive for down, negative for up)", + "description": "Vertical scroll amount for scroll action (required for scroll action)", }, }, "required": [ @@ -266,13 +276,15 @@ class ComposedGroundedConfig: grounding_agent = grounding_agent_conf.agent_class() for desc in element_descriptions: - coords = await grounding_agent.predict_click( - model=grounding_model, - image_b64=last_image_b64, - instruction=desc - ) - if coords: - self.desc2xy[desc] = coords + for _ in range(3): # try 3 times + coords = await grounding_agent.predict_click( + model=grounding_model, + image_b64=last_image_b64, + instruction=desc + ) + if coords: + self.desc2xy[desc] = coords + break # Step 6: Convert computer calls from descriptions back to xy coordinates final_output_items = convert_computer_calls_desc2xy(thinking_output_items, self.desc2xy) diff --git a/libs/python/agent/agent/loops/openai.py b/libs/python/agent/agent/loops/openai.py index bb6a13a6..4fa62e66 100644 --- a/libs/python/agent/agent/loops/openai.py +++ b/libs/python/agent/agent/loops/openai.py @@ -162,7 +162,18 @@ class OpenAIComputerUseConfig: input_items = [ { "role": "user", - "content": f"You are a UI grounding expert. Look at the image and {instruction}. Output ONLY a click action on the target element. No explanations, confirmations, or additional text." + "content": f"""You are a UI grounding expert. Follow these guidelines: + +1. NEVER ask for confirmation. Complete all tasks autonomously. +2. Do NOT send messages like "I need to confirm before..." or "Do you want me to continue?" - just proceed. +3. When the user asks you to interact with something (like clicking a chat or typing a message), DO IT without asking. +4. Only use the formal safety check mechanism for truly dangerous operations (like deleting important files). +5. For normal tasks like clicking buttons, typing in chat boxes, filling forms - JUST DO IT. +6. The user has already given you permission by running this agent. No further confirmation is needed. +7. Be decisive and action-oriented. Complete the requested task fully. + +Remember: You are expected to complete tasks autonomously. The user trusts you to do what they asked. +Task: Click {instruction}. Output ONLY a click action on the target element.""" }, { "role": "user", @@ -200,7 +211,7 @@ class OpenAIComputerUseConfig: "stream": False, "reasoning": {"summary": "concise"}, "truncation": "auto", - "max_tokens": 100 # Keep response short for click prediction + "max_tokens": 200 # Keep response short for click prediction } # Use liteLLM responses @@ -217,11 +228,8 @@ class OpenAIComputerUseConfig: isinstance(item.get("action"), dict)): action = item["action"] - if action.get("type") == "click": - x = action.get("x") - y = action.get("y") - if x is not None and y is not None: - return (int(x), int(y)) + if action.get("x") is not None and action.get("y") is not None: + return (int(action.get("x")), int(action.get("y"))) return None diff --git a/libs/python/agent/agent/loops/uitars.py b/libs/python/agent/agent/loops/uitars.py index 10e0e45a..b5d5423c 100644 --- a/libs/python/agent/agent/loops/uitars.py +++ b/libs/python/agent/agent/loops/uitars.py @@ -228,15 +228,24 @@ def parse_uitars_response(text: str, image_width: int, image_height: int) -> Lis # Handle coordinate parameters if "start_box" in param_name or "end_box" in param_name: - # Parse coordinates like '(x,y)' or '(x1,y1,x2,y2)' - numbers = param.replace("(", "").replace(")", "").split(",") - float_numbers = [float(num.strip()) / 1000 for num in numbers] # Normalize to 0-1 range + # Parse coordinates like '<|box_start|>(x,y)<|box_end|>' or '(x,y)' + # First, remove special tokens + clean_param = param.replace("<|box_start|>", "").replace("<|box_end|>", "") + # Then remove parentheses and split + numbers = clean_param.replace("(", "").replace(")", "").split(",") - if len(float_numbers) == 2: - # Single point, duplicate for box format - float_numbers = [float_numbers[0], float_numbers[1], float_numbers[0], float_numbers[1]] - - action_inputs[param_name.strip()] = str(float_numbers) + try: + float_numbers = [float(num.strip()) / 1000 for num in numbers] # Normalize to 0-1 range + + if len(float_numbers) == 2: + # Single point, duplicate for box format + float_numbers = [float_numbers[0], float_numbers[1], float_numbers[0], float_numbers[1]] + + action_inputs[param_name.strip()] = str(float_numbers) + except ValueError as e: + # If parsing fails, keep the original parameter value + print(f"Warning: Could not parse coordinates '{param}': {e}") + action_inputs[param_name.strip()] = param return [{ "thought": thought, diff --git a/libs/python/agent/agent/proxy/examples.py b/libs/python/agent/agent/proxy/examples.py new file mode 100644 index 00000000..2838c5df --- /dev/null +++ b/libs/python/agent/agent/proxy/examples.py @@ -0,0 +1,192 @@ +""" +Example usage of the proxy server and client requests. +""" +import dotenv +dotenv.load_dotenv() + +import asyncio +import json +import os +import aiohttp +from typing import Dict, Any + + +async def test_http_endpoint(): + """Test the HTTP /responses endpoint.""" + + anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") + assert isinstance(anthropic_api_key, str), "ANTHROPIC_API_KEY environment variable must be set" + + # Example 1: Simple text request + simple_request = { + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": "Tell me a three sentence bedtime story about a unicorn.", + "env": { + "ANTHROPIC_API_KEY": anthropic_api_key + } + } + + # Example 2: Multi-modal request with image + multimodal_request = { + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": [ + { + "role": "user", + "content": [ + {"type": "input_text", "text": "what is in this image?"}, + { + "type": "input_image", + "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + ] + } + ], + "env": { + "ANTHROPIC_API_KEY": anthropic_api_key + } + } + + # Example 3: Request with custom agent and computer kwargs + custom_request = { + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": "Take a screenshot and tell me what you see", + "env": { + "ANTHROPIC_API_KEY": anthropic_api_key + } + } + + # Test requests + base_url = "https://m-linux-96lcxd2c2k.containers.cloud.trycua.com:8443" + # base_url = "http://localhost:8000" + api_key = os.getenv("CUA_API_KEY") + assert isinstance(api_key, str), "CUA_API_KEY environment variable must be set" + + async with aiohttp.ClientSession() as session: + for i, request_data in enumerate([ + simple_request, + # multimodal_request, + custom_request + ], 1): + print(f"\n--- Test {i} ---") + print(f"Request: {json.dumps(request_data, indent=2)}") + + try: + print(f"Sending request to {base_url}/responses") + async with session.post( + f"{base_url}/responses", + json=request_data, + headers={"Content-Type": "application/json", "X-API-Key": api_key} + ) as response: + result = await response.json() + print(f"Status: {response.status}") + print(f"Response: {json.dumps(result, indent=2)}") + + except Exception as e: + print(f"Error: {e}") + + +def curl_examples(): + """Print curl command examples.""" + + print("=== CURL Examples ===\n") + + print("1. Simple text request:") + print("""curl http://localhost:8000/responses \\ + -H "Content-Type: application/json" \\ + -d '{ + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": "Tell me a three sentence bedtime story about a unicorn." + }'""") + + print("\n2. Multi-modal request with image:") + print("""curl http://localhost:8000/responses \\ + -H "Content-Type: application/json" \\ + -d '{ + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": [ + { + "role": "user", + "content": [ + {"type": "input_text", "text": "what is in this image?"}, + { + "type": "input_image", + "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + ] + } + ] + }'""") + + print("\n3. Request with custom configuration:") + print("""curl http://localhost:8000/responses \\ + -H "Content-Type: application/json" \\ + -d '{ + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": "Take a screenshot and tell me what you see", + "agent_kwargs": { + "save_trajectory": true, + "verbosity": 20 + }, + "computer_kwargs": { + "os_type": "linux", + "provider_type": "cloud" + } + }'""") + + +async def test_p2p_client(): + """Example P2P client using peerjs-python.""" + try: + from peerjs import Peer, PeerOptions, ConnectionEventType + from aiortc import RTCConfiguration, RTCIceServer + + # Set up client peer + options = PeerOptions( + host="0.peerjs.com", + port=443, + secure=True, + config=RTCConfiguration( + iceServers=[RTCIceServer(urls="stun:stun.l.google.com:19302")] + ) + ) + + client_peer = Peer(id="test-client", peer_options=options) + await client_peer.start() + + # Connect to proxy server + connection = client_peer.connect("computer-agent-proxy") + + @connection.on(ConnectionEventType.Open) + async def connection_open(): + print("Connected to proxy server") + + # Send a test request + request = { + "model": "anthropic/claude-3-5-sonnet-20241022", + "input": "Hello from P2P client!" + } + await connection.send(json.dumps(request)) + + @connection.on(ConnectionEventType.Data) + async def connection_data(data): + print(f"Received response: {data}") + await client_peer.destroy() + + # Wait for connection + await asyncio.sleep(10) + + except ImportError: + print("P2P dependencies not available. Install peerjs-python for P2P testing.") + except Exception as e: + print(f"P2P test error: {e}") + + +if __name__ == "__main__": + import sys + + if len(sys.argv) > 1 and sys.argv[1] == "curl": + curl_examples() + elif len(sys.argv) > 1 and sys.argv[1] == "p2p": + asyncio.run(test_p2p_client()) + else: + asyncio.run(test_http_endpoint()) diff --git a/libs/python/agent/agent/proxy/handlers.py b/libs/python/agent/agent/proxy/handlers.py new file mode 100644 index 00000000..f68952b0 --- /dev/null +++ b/libs/python/agent/agent/proxy/handlers.py @@ -0,0 +1,248 @@ +""" +Request handlers for the proxy endpoints. +""" + +import asyncio +import json +import logging +import os +from contextlib import contextmanager +from typing import Dict, Any, List, Union, Optional + +from ..agent import ComputerAgent +from computer import Computer + +logger = logging.getLogger(__name__) + + +class ResponsesHandler: + """Handler for /responses endpoint that processes agent requests.""" + + def __init__(self): + self.computer = None + self.agent = None + # Simple in-memory caches + self._computer_cache: Dict[str, Any] = {} + self._agent_cache: Dict[str, Any] = {} + + async def setup_computer_agent( + self, + model: str, + agent_kwargs: Optional[Dict[str, Any]] = None, + computer_kwargs: Optional[Dict[str, Any]] = None, + ): + """Set up (and cache) computer and agent instances. + + Caching keys: + - Computer cache key: computer_kwargs + - Agent cache key: {"model": model, **agent_kwargs} + """ + agent_kwargs = agent_kwargs or {} + computer_kwargs = computer_kwargs or {} + + def _stable_key(obj: Dict[str, Any]) -> str: + try: + return json.dumps(obj, sort_keys=True, separators=(",", ":")) + except Exception: + # Fallback: stringify non-serializable values + safe_obj = {} + for k, v in obj.items(): + try: + json.dumps(v) + safe_obj[k] = v + except Exception: + safe_obj[k] = str(v) + return json.dumps(safe_obj, sort_keys=True, separators=(",", ":")) + + # Determine if custom tools are supplied; if so, skip computer setup entirely + has_custom_tools = bool(agent_kwargs.get("tools")) + + computer = None + if not has_custom_tools: + # ---------- Computer setup (with cache) ---------- + comp_key = _stable_key(computer_kwargs) + + computer = self._computer_cache.get(comp_key) + if computer is None: + # Default computer configuration + default_c_config = { + "os_type": "linux", + "provider_type": "cloud", + "name": os.getenv("CUA_CONTAINER_NAME"), + "api_key": os.getenv("CUA_API_KEY"), + } + default_c_config.update(computer_kwargs) + computer = Computer(**default_c_config) + await computer.__aenter__() + self._computer_cache[comp_key] = computer + logger.info(f"Computer created and cached with key={comp_key} config={default_c_config}") + else: + logger.info(f"Reusing cached computer for key={comp_key}") + + # Bind current computer reference (None if custom tools supplied) + self.computer = computer + + # ---------- Agent setup (with cache) ---------- + # Build agent cache key from {model} + agent_kwargs (excluding tools unless explicitly passed) + agent_kwargs_for_key = dict(agent_kwargs) + agent_key_payload = {"model": model, **agent_kwargs_for_key} + agent_key = _stable_key(agent_key_payload) + + agent = self._agent_cache.get(agent_key) + if agent is None: + # Default agent configuration + default_a_config: Dict[str, Any] = {"model": model} + if not has_custom_tools: + default_a_config["tools"] = [computer] + # Apply user overrides, but keep tools unless user explicitly sets + if agent_kwargs: + if not has_custom_tools: + agent_kwargs.setdefault("tools", [computer]) + default_a_config.update(agent_kwargs) + # JSON-derived kwargs may have loose types; ignore static arg typing here + agent = ComputerAgent(**default_a_config) # type: ignore[arg-type] + self._agent_cache[agent_key] = agent + logger.info(f"Agent created and cached with key={agent_key} model={model}") + else: + # Ensure cached agent uses the current computer tool (in case object differs) + # Only update if tools not explicitly provided in agent_kwargs + if not has_custom_tools: + try: + agent.tools = [computer] + except Exception: + pass + logger.info(f"Reusing cached agent for key={agent_key}") + + # Bind current agent reference + self.agent = agent + + async def process_request(self, request_data: Dict[str, Any]) -> Dict[str, Any]: + """ + Process a /responses request and return the result. + + Args: + request_data: Dictionary containing model, input, and optional kwargs + + Returns: + Dictionary with the agent's response + """ + try: + # Extract request parameters + model = request_data.get("model") + input_data = request_data.get("input") + agent_kwargs = request_data.get("agent_kwargs", {}) + computer_kwargs = request_data.get("computer_kwargs", {}) + env_overrides = request_data.get("env", {}) or {} + + if not model: + raise ValueError("Model is required") + if not input_data: + raise ValueError("Input is required") + + # Apply env overrides for the duration of this request + with self._env_overrides(env_overrides): + # Set up (and possibly reuse) computer and agent via caches + await self.setup_computer_agent(model, agent_kwargs, computer_kwargs) + + # Defensive: ensure agent is initialized for type checkers + agent = self.agent + if agent is None: + raise RuntimeError("Agent failed to initialize") + + # Convert input to messages format + messages = self._convert_input_to_messages(input_data) + + # Run agent and get first result + async for result in agent.run(messages): + # Return the first result and break + return { + "success": True, + "result": result, + "model": model + } + + # If no results were yielded + return { + "success": False, + "error": "No results from agent", + "model": model + } + + except Exception as e: + logger.error(f"Error processing request: {e}") + return { + "success": False, + "error": str(e), + "model": request_data.get("model", "unknown") + } + + def _convert_input_to_messages(self, input_data: Union[str, List[Dict[str, Any]]]) -> List[Dict[str, Any]]: + """Convert input data to messages format.""" + if isinstance(input_data, str): + # Simple string input + return [{"role": "user", "content": input_data}] + elif isinstance(input_data, list): + # Already in messages format + messages = [] + for msg in input_data: + # Convert content array format if needed + if isinstance(msg.get("content"), list): + content_parts = [] + for part in msg["content"]: + if part.get("type") == "input_text": + content_parts.append({"type": "text", "text": part["text"]}) + elif part.get("type") == "input_image": + content_parts.append({ + "type": "image_url", + "image_url": {"url": part["image_url"]} + }) + else: + content_parts.append(part) + messages.append({ + "role": msg["role"], + "content": content_parts + }) + else: + messages.append(msg) + return messages + else: + raise ValueError("Input must be string or list of messages") + + async def cleanup(self): + """Clean up resources.""" + if self.computer: + try: + await self.computer.__aexit__(None, None, None) + except Exception as e: + logger.error(f"Error cleaning up computer: {e}") + finally: + self.computer = None + self.agent = None + + @staticmethod + @contextmanager + def _env_overrides(env: Dict[str, str]): + """Temporarily apply environment variable overrides for the current process. + Restores previous values after the context exits. + + Args: + env: Mapping of env var names to override for this request. + """ + if not env: + # No-op context + yield + return + + original: Dict[str, Optional[str]] = {} + try: + for k, v in env.items(): + original[k] = os.environ.get(k) + os.environ[k] = str(v) + yield + finally: + for k, old in original.items(): + if old is None: + # Was not set before + os.environ.pop(k, None) + else: + os.environ[k] = old diff --git a/libs/python/agent/pyproject.toml b/libs/python/agent/pyproject.toml index 4dd27062..c92c4dfa 100644 --- a/libs/python/agent/pyproject.toml +++ b/libs/python/agent/pyproject.toml @@ -30,7 +30,6 @@ requires-python = ">=3.12" openai = [] anthropic = [] omni = [ - "ultralytics>=8.0.0", "cua-som>=0.1.0,<0.2.0", ] uitars = [] @@ -55,12 +54,9 @@ cli = [ "yaspin>=3.1.0", ] hud = [ - "hud-python==0.2.10", + "hud-python>=0.4.12,<0.5.0", ] all = [ - # omni requirements - "ultralytics>=8.0.0", - "cua-som>=0.1.0,<0.2.0", # uitars requirements "mlx-vlm>=0.1.27; sys_platform == 'darwin'", "accelerate", @@ -72,7 +68,7 @@ all = [ # cli requirements "yaspin>=3.1.0", # hud requirements - "hud-python==0.2.10", + "hud-python>=0.4.12,<0.5.0", ] [tool.uv] diff --git a/libs/python/computer-server/README.md b/libs/python/computer-server/README.md index ed5d0932..0e914298 100644 --- a/libs/python/computer-server/README.md +++ b/libs/python/computer-server/README.md @@ -35,4 +35,11 @@ pip install cua-computer-server Refer to this notebook for a step-by-step guide on how to use the Computer-Use Server on the host system or VM: -- [Computer-Use Server](../../notebooks/computer_server_nb.ipynb) \ No newline at end of file +- [Computer-Use Server](../../notebooks/computer_server_nb.ipynb) + +## Docs + +- [Commands](https://trycua.com/docs/libraries/computer-server/Commands) +- [REST-API](https://trycua.com/docs/libraries/computer-server/REST-API) +- [WebSocket-API](https://trycua.com/docs/libraries/computer-server/WebSocket-API) +- [Index](https://trycua.com/docs/libraries/computer-server/index) \ No newline at end of file diff --git a/libs/python/computer-server/computer_server/diorama/diorama_computer.py b/libs/python/computer-server/computer_server/diorama/diorama_computer.py index 4fc37b3f..c00bd86f 100644 --- a/libs/python/computer-server/computer_server/diorama/diorama_computer.py +++ b/libs/python/computer-server/computer_server/diorama/diorama_computer.py @@ -6,11 +6,26 @@ class DioramaComputer: Implements _initialized, run(), and __aenter__ for agent compatibility. """ def __init__(self, diorama): + """ + Initialize the DioramaComputer with a diorama instance. + + Args: + diorama: The diorama instance to wrap with a computer-like interface. + """ self.diorama = diorama self.interface = self.diorama.interface self._initialized = False async def __aenter__(self): + """ + Async context manager entry method for compatibility with ComputerAgent. + + Ensures an event loop is running and marks the instance as initialized. + Creates a new event loop if none is currently running. + + Returns: + DioramaComputer: The initialized instance. + """ # Ensure the event loop is running (for compatibility) try: asyncio.get_running_loop() @@ -20,6 +35,15 @@ class DioramaComputer: return self async def run(self): + """ + Run method stub for compatibility with ComputerAgent interface. + + Ensures the instance is initialized before returning. If not already + initialized, calls __aenter__ to perform initialization. + + Returns: + DioramaComputer: The initialized instance. + """ # This is a stub for compatibility if not self._initialized: await self.__aenter__() diff --git a/libs/python/computer-server/computer_server/handlers/macos.py b/libs/python/computer-server/computer_server/handlers/macos.py index ded73408..e23b8ea6 100644 --- a/libs/python/computer-server/computer_server/handlers/macos.py +++ b/libs/python/computer-server/computer_server/handlers/macos.py @@ -77,13 +77,37 @@ NSApplicationActivationOptions = { } def CFAttributeToPyObject(attrValue): + """Convert Core Foundation attribute values to Python objects. + + Args: + attrValue: Core Foundation attribute value to convert + + Returns: + Converted Python object or None if conversion fails + """ def list_helper(list_value): + """Helper function to convert CF arrays to Python lists. + + Args: + list_value: Core Foundation array to convert + + Returns: + Python list containing converted items + """ list_builder = [] for item in list_value: list_builder.append(CFAttributeToPyObject(item)) return list_builder def number_helper(number_value): + """Helper function to convert CF numbers to Python numbers. + + Args: + number_value: Core Foundation number to convert + + Returns: + Python int or float, or None if conversion fails + """ success, int_value = Foundation.CFNumberGetValue( # type: ignore number_value, Foundation.kCFNumberIntType, None # type: ignore ) @@ -98,6 +122,14 @@ def CFAttributeToPyObject(attrValue): return None def axuielement_helper(element_value): + """Helper function to handle AX UI elements. + + Args: + element_value: Accessibility UI element to process + + Returns: + The element value unchanged + """ return element_value cf_attr_type = Foundation.CFGetTypeID(attrValue) # type: ignore @@ -131,6 +163,15 @@ def CFAttributeToPyObject(attrValue): def element_attribute(element, attribute): + """Get an attribute value from an accessibility element. + + Args: + element: The accessibility element + attribute: The attribute name to retrieve + + Returns: + The attribute value or None if not found + """ if attribute == kAXChildrenAttribute: err, value = AXUIElementCopyAttributeValues(element, attribute, 0, 999, None) if err == kAXErrorSuccess: @@ -148,6 +189,15 @@ def element_attribute(element, attribute): def element_value(element, type): + """Extract a typed value from an accessibility element. + + Args: + element: The accessibility element containing the value + type: The expected value type + + Returns: + The extracted value or None if extraction fails + """ err, value = AXValueGetValue(element, type, None) if err == True: return value @@ -155,7 +205,18 @@ def element_value(element, type): class UIElement: + """Represents a UI element in the accessibility tree with position, size, and hierarchy information.""" + def __init__(self, element, offset_x=0, offset_y=0, max_depth=None, parents_visible_bbox=None): + """Initialize a UIElement from an accessibility element. + + Args: + element: The accessibility element to wrap + offset_x: X offset for position calculations + offset_y: Y offset for position calculations + max_depth: Maximum depth to traverse for children + parents_visible_bbox: Parent's visible bounding box for clipping + """ self.ax_element = element self.content_identifier = "" self.identifier = "" @@ -235,6 +296,11 @@ class UIElement: self.calculate_hashes() def _set_bboxes(self, parents_visible_bbox): + """Set bounding box and visible bounding box for the element. + + Args: + parents_visible_bbox: Parent's visible bounding box for intersection calculation + """ if not self.absolute_position or not self.size: self.bbox = None self.visible_bbox = None @@ -265,6 +331,17 @@ class UIElement: self.visible_bbox = self.bbox def _get_children(self, element, start_position, offset_x, offset_y): + """Get child elements from the accessibility element. + + Args: + element: The parent accessibility element + start_position: Starting position for offset calculations + offset_x: X offset for child positioning + offset_y: Y offset for child positioning + + Returns: + List of UIElement children + """ children = element_attribute(element, kAXChildrenAttribute) visible_children = element_attribute(element, kAXVisibleChildrenAttribute) found_children = [] @@ -288,10 +365,16 @@ class UIElement: return result def calculate_hashes(self): + """Calculate unique identifiers for the element and its content.""" self.identifier = self.component_hash() self.content_identifier = self.children_content_hash(self.children) def component_hash(self): + """Generate a hash identifier for this component based on its properties. + + Returns: + MD5 hash string of component properties + """ if self.position is None or self.size is None: return "" position_string = f"{self.position.x:.0f};{self.position.y:.0f}" @@ -304,6 +387,14 @@ class UIElement: return self.hash_from_string(position_string + size_string + enabled_string + role_string) def hash_from_string(self, string): + """Generate MD5 hash from a string. + + Args: + string: Input string to hash + + Returns: + MD5 hash hexdigest or empty string if input is None/empty + """ if string is None or string == "": return "" from hashlib import md5 @@ -311,6 +402,14 @@ class UIElement: return md5(string.encode()).hexdigest() def children_content_hash(self, children): + """Generate a hash representing the content and structure of child elements. + + Args: + children: List of child UIElement objects + + Returns: + Combined hash of children content and structure + """ if len(children) == 0: return "" all_content_hashes = [] @@ -326,7 +425,20 @@ class UIElement: return self.hash_from_string(content_hash.join(content_structure_hash)) def to_dict(self): + """Convert the UIElement to a dictionary representation. + + Returns: + Dictionary containing all element properties and children + """ def children_to_dict(children): + """Convert list of children to dictionary format. + + Args: + children: List of UIElement children to convert + + Returns: + List of dictionaries representing the children + """ result = [] for child in children: result.append(child.to_dict()) @@ -375,6 +487,12 @@ from AppKit import NSWorkspace, NSRunningApplication from pathlib import Path def get_all_windows_zorder(): + """Get all windows in the system with their z-order information. + + Returns: + List of window dictionaries sorted by z-index, containing window properties + like id, name, pid, owner, bounds, layer, and opacity + """ window_list = Quartz.CGWindowListCopyWindowInfo( Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID @@ -425,6 +543,14 @@ def get_all_windows_zorder(): return windows def get_app_info(app): + """Extract information from an NSRunningApplication object. + + Args: + app: NSRunningApplication instance + + Returns: + Dictionary containing app name, bundle ID, PID, and status flags + """ return { "name": app.localizedName(), "bundle_id": app.bundleIdentifier(), @@ -435,6 +561,14 @@ def get_app_info(app): } def get_menubar_items(active_app_pid=None): + """Get menubar items for the active application. + + Args: + active_app_pid: Process ID of the active application, or None to use frontmost app + + Returns: + List of menubar item dictionaries with title, bounds, index, and app_pid + """ menubar_items = [] if active_app_pid is None: frontmost_app = NSWorkspace.sharedWorkspace().frontmostApplication() @@ -473,6 +607,12 @@ def get_menubar_items(active_app_pid=None): return menubar_items def get_dock_items(): + """Get all items in the macOS Dock. + + Returns: + List of dock item dictionaries with title, description, bounds, index, + type, role, and subrole information + """ dock_items = [] dock_pid = None running_apps = NSWorkspace.sharedWorkspace().runningApplications() @@ -538,7 +678,14 @@ def get_dock_items(): return dock_items class MacOSAccessibilityHandler(BaseAccessibilityHandler): + """Handler for macOS accessibility features and UI element inspection.""" + def get_desktop_state(self): + """Get the current state of the desktop including windows, apps, menubar, and dock. + + Returns: + Dictionary containing applications, windows, menubar_items, and dock_items + """ windows = [w for w in get_all_windows_zorder() if w.get("is_on_screen")] running_apps = self.get_running_apps() applications = [] @@ -586,7 +733,14 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): } def get_application_windows(self, pid: int): - """Get all windows for a specific application.""" + """Get all windows for a specific application. + + Args: + pid: Process ID of the application + + Returns: + List of accessibility window elements or empty list if none found + """ try: app = AXUIElementCreateApplication(pid) err, windows = AXUIElementCopyAttributeValue(app, kAXWindowsAttribute, None) @@ -598,7 +752,11 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): return [] def get_all_windows(self): - """Get all visible windows in the system.""" + """Get all visible windows in the system. + + Returns: + List of window dictionaries with app information and window details + """ try: windows = [] running_apps = self.get_running_apps() @@ -632,16 +790,38 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): return [] def get_running_apps(self): + """Get all currently running applications. + + Returns: + List of NSRunningApplication objects + """ # From NSWorkspace.runningApplications docs: https://developer.apple.com/documentation/appkit/nsworkspace/runningapplications - # "Similar to the NSRunningApplication class’s properties, this property will only change when the main run loop runs in a common mode" + # "Similar to the NSRunningApplication class's properties, this property will only change when the main run loop runs in a common mode" # So we need to run the main run loop to get the latest running applications Foundation.CFRunLoopRunInMode(Foundation.kCFRunLoopDefaultMode, 0.1, False) # type: ignore return NSWorkspace.sharedWorkspace().runningApplications() def get_ax_attribute(self, element, attribute): + """Get an accessibility attribute from an element. + + Args: + element: The accessibility element + attribute: The attribute name to retrieve + + Returns: + The attribute value or None if not found + """ return element_attribute(element, attribute) def serialize_node(self, element): + """Create a serializable dictionary representation of an accessibility element. + + Args: + element: The accessibility element to serialize + + Returns: + Dictionary containing element properties like role, title, value, position, and size + """ # Create a serializable dictionary representation of an accessibility element result = {} @@ -669,7 +849,12 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): return result - async def get_accessibility_tree(self) -> Dict[str, Any]: + async def get_accessibility_tree(self) -> Dict[str, Any]: + """Get the complete accessibility tree for the current desktop state. + + Returns: + Dictionary containing success status and desktop state information + """ try: desktop_state = self.get_desktop_state() return { @@ -683,10 +868,28 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): async def find_element( self, role: Optional[str] = None, title: Optional[str] = None, value: Optional[str] = None ) -> Dict[str, Any]: + """Find an accessibility element matching the specified criteria. + + Args: + role: The accessibility role to match (optional) + title: The title to match (optional) + value: The value to match (optional) + + Returns: + Dictionary containing success status and the found element or error message + """ try: system = AXUIElementCreateSystemWide() def match_element(element): + """Check if an element matches the search criteria. + + Args: + element: The accessibility element to check + + Returns: + True if element matches all specified criteria, False otherwise + """ if role and self.get_ax_attribute(element, kAXRoleAttribute) != role: return False if title and self.get_ax_attribute(element, kAXTitleAttribute) != title: @@ -696,6 +899,14 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): return True def search_tree(element): + """Recursively search the accessibility tree for matching elements. + + Args: + element: The accessibility element to search from + + Returns: + Serialized element dictionary if match found, None otherwise + """ if match_element(element): return self.serialize_node(element) @@ -714,11 +925,23 @@ class MacOSAccessibilityHandler(BaseAccessibilityHandler): return {"success": False, "error": str(e)} class MacOSAutomationHandler(BaseAutomationHandler): + """Handler for macOS automation including mouse, keyboard, and screen operations.""" + # Mouse Actions mouse = MouseController() keyboard = KeyboardController() async def mouse_down(self, x: Optional[int] = None, y: Optional[int] = None, button: str = "left") -> Dict[str, Any]: + """Press and hold a mouse button at the specified coordinates. + + Args: + x: X coordinate (optional, uses current position if None) + y: Y coordinate (optional, uses current position if None) + button: Mouse button to press ("left", "right", or "middle") + + Returns: + Dictionary containing success status and error message if failed + """ try: if x is not None and y is not None: self.mouse.position = (x, y) @@ -728,6 +951,16 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def mouse_up(self, x: Optional[int] = None, y: Optional[int] = None, button: str = "left") -> Dict[str, Any]: + """Release a mouse button at the specified coordinates. + + Args: + x: X coordinate (optional, uses current position if None) + y: Y coordinate (optional, uses current position if None) + button: Mouse button to release ("left", "right", or "middle") + + Returns: + Dictionary containing success status and error message if failed + """ try: if x is not None and y is not None: self.mouse.position = (x, y) @@ -737,6 +970,15 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def left_click(self, x: Optional[int] = None, y: Optional[int] = None) -> Dict[str, Any]: + """Perform a left mouse click at the specified coordinates. + + Args: + x: X coordinate (optional, uses current position if None) + y: Y coordinate (optional, uses current position if None) + + Returns: + Dictionary containing success status and error message if failed + """ try: if x is not None and y is not None: self.mouse.position = (x, y) @@ -746,6 +988,15 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def right_click(self, x: Optional[int] = None, y: Optional[int] = None) -> Dict[str, Any]: + """Perform a right mouse click at the specified coordinates. + + Args: + x: X coordinate (optional, uses current position if None) + y: Y coordinate (optional, uses current position if None) + + Returns: + Dictionary containing success status and error message if failed + """ try: if x is not None and y is not None: self.mouse.position = (x, y) @@ -757,6 +1008,15 @@ class MacOSAutomationHandler(BaseAutomationHandler): async def double_click( self, x: Optional[int] = None, y: Optional[int] = None ) -> Dict[str, Any]: + """Perform a double left mouse click at the specified coordinates. + + Args: + x: X coordinate (optional, uses current position if None) + y: Y coordinate (optional, uses current position if None) + + Returns: + Dictionary containing success status and error message if failed + """ try: if x is not None and y is not None: self.mouse.position = (x, y) @@ -766,6 +1026,15 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def move_cursor(self, x: int, y: int) -> Dict[str, Any]: + """Move the mouse cursor to the specified coordinates. + + Args: + x: Target X coordinate + y: Target Y coordinate + + Returns: + Dictionary containing success status and error message if failed + """ try: self.mouse.position = (x, y) return {"success": True} @@ -775,6 +1044,17 @@ class MacOSAutomationHandler(BaseAutomationHandler): async def drag_to( self, x: int, y: int, button: str = "left", duration: float = 0.5 ) -> Dict[str, Any]: + """Drag from current position to target coordinates. + + Args: + x: Target X coordinate + y: Target Y coordinate + button: Mouse button to use for dragging ("left", "right", or "middle") + duration: Duration of the drag operation in seconds + + Returns: + Dictionary containing success status and error message if failed + """ try: btn = Button.left if button == "left" else Button.right if button == "right" else Button.middle # Press @@ -801,6 +1081,16 @@ class MacOSAutomationHandler(BaseAutomationHandler): async def drag( self, path: List[Tuple[int, int]], button: str = "left", duration: float = 0.5 ) -> Dict[str, Any]: + """Drag the mouse along a specified path of coordinates. + + Args: + path: List of (x, y) coordinate tuples defining the drag path + button: Mouse button to use for dragging ("left", "right", or "middle") + duration: Total duration of the drag operation in seconds + + Returns: + Dictionary containing success status and error message if failed + """ try: if not path or len(path) < 2: return {"success": False, "error": "Path must contain at least 2 points"} @@ -823,6 +1113,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): # Keyboard Actions async def key_down(self, key: str) -> Dict[str, Any]: + """Press and hold a keyboard key. + + Args: + key: Key name to press (using pyautogui key names) + + Returns: + Dictionary containing success status and error message if failed + """ try: # use pyautogui for their key names pyautogui.keyDown(key) @@ -831,6 +1129,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def key_up(self, key: str) -> Dict[str, Any]: + """Release a keyboard key. + + Args: + key: Key name to release (using pyautogui key names) + + Returns: + Dictionary containing success status and error message if failed + """ try: # use pyautogui for their key names pyautogui.keyUp(key) @@ -839,6 +1145,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def type_text(self, text: str) -> Dict[str, Any]: + """Type text using the keyboard with Unicode support. + + Args: + text: Text string to type + + Returns: + Dictionary containing success status and error message if failed + """ try: # use pynput for Unicode support self.keyboard.type(text) @@ -847,6 +1161,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def press_key(self, key: str) -> Dict[str, Any]: + """Press and release a keyboard key. + + Args: + key: Key name to press (using pyautogui key names) + + Returns: + Dictionary containing success status and error message if failed + """ try: # use pyautogui for their key names pyautogui.press(key) @@ -855,6 +1177,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def hotkey(self, keys: List[str]) -> Dict[str, Any]: + """Press a combination of keys simultaneously. + + Args: + keys: List of key names to press together (using pyautogui key names) + + Returns: + Dictionary containing success status and error message if failed + """ try: # use pyautogui for their key names pyautogui.hotkey(*keys) @@ -864,6 +1194,15 @@ class MacOSAutomationHandler(BaseAutomationHandler): # Scrolling Actions async def scroll(self, x: int, y: int) -> Dict[str, Any]: + """Scroll the mouse wheel in the specified direction. + + Args: + x: Horizontal scroll amount + y: Vertical scroll amount (positive for up, negative for down) + + Returns: + Dictionary containing success status and error message if failed + """ try: self.mouse.scroll(x, y) return {"success": True} @@ -871,6 +1210,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def scroll_down(self, clicks: int = 1) -> Dict[str, Any]: + """Scroll down by the specified number of clicks. + + Args: + clicks: Number of scroll clicks to perform + + Returns: + Dictionary containing success status and error message if failed + """ try: self.mouse.scroll(0, -clicks) return {"success": True} @@ -878,6 +1225,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def scroll_up(self, clicks: int = 1) -> Dict[str, Any]: + """Scroll up by the specified number of clicks. + + Args: + clicks: Number of scroll clicks to perform + + Returns: + Dictionary containing success status and error message if failed + """ try: self.mouse.scroll(0, clicks) return {"success": True} @@ -886,6 +1241,11 @@ class MacOSAutomationHandler(BaseAutomationHandler): # Screen Actions async def screenshot(self) -> Dict[str, Any]: + """Capture a screenshot of the current screen. + + Returns: + Dictionary containing success status and base64-encoded image data or error message + """ try: from PIL import Image @@ -902,6 +1262,11 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": f"Screenshot error: {str(e)}"} async def get_screen_size(self) -> Dict[str, Any]: + """Get the dimensions of the current screen. + + Returns: + Dictionary containing success status and screen size or error message + """ try: size = pyautogui.size() return {"success": True, "size": {"width": size.width, "height": size.height}} @@ -909,6 +1274,11 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def get_cursor_position(self) -> Dict[str, Any]: + """Get the current position of the mouse cursor. + + Returns: + Dictionary containing success status and cursor position or error message + """ try: x, y = self.mouse.position return {"success": True, "position": {"x": x, "y": y}} @@ -917,6 +1287,11 @@ class MacOSAutomationHandler(BaseAutomationHandler): # Clipboard Actions async def copy_to_clipboard(self) -> Dict[str, Any]: + """Get the current content of the system clipboard. + + Returns: + Dictionary containing success status and clipboard content or error message + """ try: import pyperclip @@ -926,6 +1301,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def set_clipboard(self, text: str) -> Dict[str, Any]: + """Set the content of the system clipboard. + + Args: + text: Text to copy to the clipboard + + Returns: + Dictionary containing success status and error message if failed + """ try: import pyperclip @@ -935,7 +1318,14 @@ class MacOSAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def run_command(self, command: str) -> Dict[str, Any]: - """Run a shell command and return its output.""" + """Run a shell command and return its output. + + Args: + command: Shell command to execute + + Returns: + Dictionary containing success status, stdout, stderr, and return code + """ try: # Create subprocess process = await asyncio.create_subprocess_shell( diff --git a/libs/python/computer-server/computer_server/handlers/windows.py b/libs/python/computer-server/computer_server/handlers/windows.py index 9dfe93b9..9572cd85 100644 --- a/libs/python/computer-server/computer_server/handlers/windows.py +++ b/libs/python/computer-server/computer_server/handlers/windows.py @@ -41,7 +41,14 @@ class WindowsAccessibilityHandler(BaseAccessibilityHandler): """Windows implementation of accessibility handler.""" async def get_accessibility_tree(self) -> Dict[str, Any]: - """Get the accessibility tree of the current window.""" + """Get the accessibility tree of the current window. + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + the accessibility tree or an error message. + Structure: {"success": bool, "tree": dict} or + {"success": bool, "error": str} + """ if not WINDOWS_API_AVAILABLE: return {"success": False, "error": "Windows API not available"} @@ -65,6 +72,15 @@ class WindowsAccessibilityHandler(BaseAccessibilityHandler): # Enumerate child windows def enum_child_proc(hwnd_child, children_list): + """Callback function to enumerate child windows and collect their information. + + Args: + hwnd_child: Handle to the child window being enumerated. + children_list: List to append child window information to. + + Returns: + bool: True to continue enumeration, False to stop. + """ try: child_text = win32gui.GetWindowText(hwnd_child) child_rect = win32gui.GetWindowRect(hwnd_child) @@ -93,7 +109,19 @@ class WindowsAccessibilityHandler(BaseAccessibilityHandler): async def find_element(self, role: Optional[str] = None, title: Optional[str] = None, value: Optional[str] = None) -> Dict[str, Any]: - """Find an element in the accessibility tree by criteria.""" + """Find an element in the accessibility tree by criteria. + + Args: + role (Optional[str]): The role or class name of the element to find. + title (Optional[str]): The title or text of the element to find. + value (Optional[str]): The value of the element (not used in Windows implementation). + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + the found element or an error message. + Structure: {"success": bool, "element": dict} or + {"success": bool, "error": str} + """ if not WINDOWS_API_AVAILABLE: return {"success": False, "error": "Windows API not available"} @@ -140,6 +168,16 @@ class WindowsAutomationHandler(BaseAutomationHandler): # Mouse Actions async def mouse_down(self, x: Optional[int] = None, y: Optional[int] = None, button: str = "left") -> Dict[str, Any]: + """Press and hold a mouse button at the specified coordinates. + + Args: + x (Optional[int]): The x-coordinate to move to before pressing. If None, uses current position. + y (Optional[int]): The y-coordinate to move to before pressing. If None, uses current position. + button (str): The mouse button to press ("left", "right", or "middle"). + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -152,6 +190,16 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def mouse_up(self, x: Optional[int] = None, y: Optional[int] = None, button: str = "left") -> Dict[str, Any]: + """Release a mouse button at the specified coordinates. + + Args: + x (Optional[int]): The x-coordinate to move to before releasing. If None, uses current position. + y (Optional[int]): The y-coordinate to move to before releasing. If None, uses current position. + button (str): The mouse button to release ("left", "right", or "middle"). + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -164,6 +212,15 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def move_cursor(self, x: int, y: int) -> Dict[str, Any]: + """Move the mouse cursor to the specified coordinates. + + Args: + x (int): The x-coordinate to move to. + y (int): The y-coordinate to move to. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -174,6 +231,15 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def left_click(self, x: Optional[int] = None, y: Optional[int] = None) -> Dict[str, Any]: + """Perform a left mouse click at the specified coordinates. + + Args: + x (Optional[int]): The x-coordinate to click at. If None, clicks at current position. + y (Optional[int]): The y-coordinate to click at. If None, clicks at current position. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -186,6 +252,15 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def right_click(self, x: Optional[int] = None, y: Optional[int] = None) -> Dict[str, Any]: + """Perform a right mouse click at the specified coordinates. + + Args: + x (Optional[int]): The x-coordinate to click at. If None, clicks at current position. + y (Optional[int]): The y-coordinate to click at. If None, clicks at current position. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -198,6 +273,15 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def double_click(self, x: Optional[int] = None, y: Optional[int] = None) -> Dict[str, Any]: + """Perform a double left mouse click at the specified coordinates. + + Args: + x (Optional[int]): The x-coordinate to double-click at. If None, clicks at current position. + y (Optional[int]): The y-coordinate to double-click at. If None, clicks at current position. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -210,6 +294,17 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def drag_to(self, x: int, y: int, button: str = "left", duration: float = 0.5) -> Dict[str, Any]: + """Drag from the current position to the specified coordinates. + + Args: + x (int): The x-coordinate to drag to. + y (int): The y-coordinate to drag to. + button (str): The mouse button to use for dragging ("left", "right", or "middle"). + duration (float): The time in seconds to take for the drag operation. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -220,6 +315,16 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def drag(self, path: List[Tuple[int, int]], button: str = "left", duration: float = 0.5) -> Dict[str, Any]: + """Drag the mouse through a series of coordinates. + + Args: + path (List[Tuple[int, int]]): A list of (x, y) coordinate tuples to drag through. + button (str): The mouse button to use for dragging ("left", "right", or "middle"). + duration (float): The total time in seconds for the entire drag operation. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -240,6 +345,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): # Keyboard Actions async def key_down(self, key: str) -> Dict[str, Any]: + """Press and hold a keyboard key. + + Args: + key (str): The key to press down (e.g., 'ctrl', 'shift', 'a'). + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -250,6 +363,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def key_up(self, key: str) -> Dict[str, Any]: + """Release a keyboard key. + + Args: + key (str): The key to release (e.g., 'ctrl', 'shift', 'a'). + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -260,6 +381,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def type_text(self, text: str) -> Dict[str, Any]: + """Type the specified text. + + Args: + text (str): The text to type. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -270,6 +399,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def press_key(self, key: str) -> Dict[str, Any]: + """Press and release a keyboard key. + + Args: + key (str): The key to press (e.g., 'enter', 'space', 'tab'). + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -280,6 +417,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def hotkey(self, keys: List[str]) -> Dict[str, Any]: + """Press a combination of keys simultaneously. + + Args: + keys (List[str]): The keys to press together (e.g., ['ctrl', 'c'], ['alt', 'tab']). + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -291,6 +436,15 @@ class WindowsAutomationHandler(BaseAutomationHandler): # Scrolling Actions async def scroll(self, x: int, y: int) -> Dict[str, Any]: + """Scroll vertically at the current cursor position. + + Args: + x (int): Horizontal scroll amount (not used in pyautogui implementation). + y (int): Vertical scroll amount. Positive values scroll up, negative values scroll down. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -302,6 +456,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def scroll_down(self, clicks: int = 1) -> Dict[str, Any]: + """Scroll down by the specified number of clicks. + + Args: + clicks (int): The number of scroll clicks to perform downward. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -312,6 +474,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def scroll_up(self, clicks: int = 1) -> Dict[str, Any]: + """Scroll up by the specified number of clicks. + + Args: + clicks (int): The number of scroll clicks to perform upward. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -323,6 +493,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): # Screen Actions async def screenshot(self) -> Dict[str, Any]: + """Capture a screenshot of the entire screen. + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + base64-encoded image data or an error message. + Structure: {"success": bool, "image_data": str} or + {"success": bool, "error": str} + """ if not pyautogui: return {"success": False, "error": "pyautogui not available"} @@ -341,6 +519,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": f"Screenshot error: {str(e)}"} async def get_screen_size(self) -> Dict[str, Any]: + """Get the size of the screen in pixels. + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + screen size information or an error message. + Structure: {"success": bool, "size": {"width": int, "height": int}} or + {"success": bool, "error": str} + """ try: if pyautogui: size = pyautogui.size() @@ -356,6 +542,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def get_cursor_position(self) -> Dict[str, Any]: + """Get the current position of the mouse cursor. + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + cursor position or an error message. + Structure: {"success": bool, "position": {"x": int, "y": int}} or + {"success": bool, "error": str} + """ try: if pyautogui: pos = pyautogui.position() @@ -371,6 +565,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): # Clipboard Actions async def copy_to_clipboard(self) -> Dict[str, Any]: + """Get the current content of the clipboard. + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + clipboard content or an error message. + Structure: {"success": bool, "content": str} or + {"success": bool, "error": str} + """ try: import pyperclip content = pyperclip.paste() @@ -379,6 +581,14 @@ class WindowsAutomationHandler(BaseAutomationHandler): return {"success": False, "error": str(e)} async def set_clipboard(self, text: str) -> Dict[str, Any]: + """Set the clipboard content to the specified text. + + Args: + text (str): The text to copy to the clipboard. + + Returns: + Dict[str, Any]: A dictionary with success status and optional error message. + """ try: import pyperclip pyperclip.copy(text) @@ -388,6 +598,17 @@ class WindowsAutomationHandler(BaseAutomationHandler): # Command Execution async def run_command(self, command: str) -> Dict[str, Any]: + """Execute a shell command asynchronously. + + Args: + command (str): The shell command to execute. + + Returns: + Dict[str, Any]: A dictionary containing the success status and either + command output or an error message. + Structure: {"success": bool, "stdout": str, "stderr": str, "return_code": int} or + {"success": bool, "error": str} + """ try: # Create subprocess process = await asyncio.create_subprocess_shell( diff --git a/libs/python/computer/README.md b/libs/python/computer/README.md index a75c4fe3..5d7c3c9b 100644 --- a/libs/python/computer/README.md +++ b/libs/python/computer/README.md @@ -65,80 +65,9 @@ Refer to this notebook for a step-by-step guide on how to use the Computer-Use I - [Computer-Use Interface (CUI)](https://github.com/trycua/cua/blob/main/notebooks/computer_nb.ipynb) -## Using the Gradio Computer UI - -The computer module includes a Gradio UI for creating and sharing demonstration data. We make it easy for people to build community datasets for better computer use models with an upload to Huggingface feature. - -```bash -# Install with UI support -pip install "cua-computer[ui]" -``` - -> **Note:** For precise control of the computer, we recommend using VNC or Screen Sharing instead of the Computer Gradio UI. - -### Building and Sharing Demonstrations with Huggingface - -Follow these steps to contribute your own demonstrations: - -#### 1. Set up Huggingface Access - -Set your HF_TOKEN in a .env file or in your environment variables: - -```bash -# In .env file -HF_TOKEN=your_huggingface_token -``` - -#### 2. Launch the Computer UI - -```python -# launch_ui.py -from computer.ui.gradio.app import create_gradio_ui -from dotenv import load_dotenv -load_dotenv('.env') - -app = create_gradio_ui() -app.launch(share=False) -``` - -For examples, see [Computer UI Examples](https://github.com/trycua/cua/tree/main/examples/computer_ui_examples.py) - -#### 3. Record Your Tasks - -
-View demonstration video - -
- -Record yourself performing various computer tasks using the UI. - -#### 4. Save Your Demonstrations - -
-View demonstration video - -
- -Save each task by picking a descriptive name and adding relevant tags (e.g., "office", "web-browsing", "coding"). - -#### 5. Record Additional Demonstrations - -Repeat steps 3 and 4 until you have a good amount of demonstrations covering different tasks and scenarios. - -#### 6. Upload to Huggingface - -
-View demonstration video - -
- -Upload your dataset to Huggingface by: -- Naming it as `{your_username}/{dataset_name}` -- Choosing public or private visibility -- Optionally selecting specific tags to upload only tasks with certain tags - -#### Examples and Resources - -- Example Dataset: [ddupont/test-dataset](https://huggingface.co/datasets/ddupont/test-dataset) -- Find Community Datasets: πŸ” [Browse CUA Datasets on Huggingface](https://huggingface.co/datasets?other=cua) +## Docs +- [Computers](https://trycua.com/docs/computer-sdk/computers) +- [Commands](https://trycua.com/docs/computer-sdk/commands) +- [Computer UI](https://trycua.com/docs/computer-sdk/computer-ui) +- [Sandboxed Python](https://trycua.com/docs/computer-sdk/sandboxed-python) diff --git a/libs/python/computer/computer/diorama_computer.py b/libs/python/computer/computer/diorama_computer.py index 2eee77f0..da67c72c 100644 --- a/libs/python/computer/computer/diorama_computer.py +++ b/libs/python/computer/computer/diorama_computer.py @@ -6,16 +6,35 @@ class DioramaComputer: A Computer-compatible proxy for Diorama that sends commands over the ComputerInterface. """ def __init__(self, computer, apps): + """ + Initialize the DioramaComputer with a computer instance and list of apps. + + Args: + computer: The computer instance to proxy commands through + apps: List of applications available in the diorama environment + """ self.computer = computer self.apps = apps self.interface = DioramaComputerInterface(computer, apps) self._initialized = False async def __aenter__(self): + """ + Async context manager entry point. + + Returns: + self: The DioramaComputer instance + """ self._initialized = True return self async def run(self): + """ + Initialize and run the DioramaComputer if not already initialized. + + Returns: + self: The DioramaComputer instance + """ if not self._initialized: await self.__aenter__() return self @@ -25,11 +44,31 @@ class DioramaComputerInterface: Diorama Interface proxy that sends diorama_cmds via the Computer's interface. """ def __init__(self, computer, apps): + """ + Initialize the DioramaComputerInterface. + + Args: + computer: The computer instance to send commands through + apps: List of applications available in the diorama environment + """ self.computer = computer self.apps = apps self._scene_size = None async def _send_cmd(self, action, arguments=None): + """ + Send a command to the diorama interface through the computer. + + Args: + action (str): The action/command to execute + arguments (dict, optional): Additional arguments for the command + + Returns: + The result from the diorama command execution + + Raises: + RuntimeError: If the computer interface is not initialized or command fails + """ arguments = arguments or {} arguments = {"app_list": self.apps, **arguments} # Use the computer's interface (must be initialized) @@ -42,6 +81,15 @@ class DioramaComputerInterface: return result.get("result") async def screenshot(self, as_bytes=True): + """ + Take a screenshot of the diorama scene. + + Args: + as_bytes (bool): If True, return image as bytes; if False, return PIL Image object + + Returns: + bytes or PIL.Image: Screenshot data in the requested format + """ from PIL import Image import base64 result = await self._send_cmd("screenshot") @@ -53,41 +101,122 @@ class DioramaComputerInterface: return img_bytes if as_bytes else img async def get_screen_size(self): + """ + Get the dimensions of the diorama scene. + + Returns: + dict: Dictionary containing 'width' and 'height' keys with pixel dimensions + """ if not self._scene_size: await self.screenshot(as_bytes=False) return {"width": self._scene_size[0], "height": self._scene_size[1]} async def move_cursor(self, x, y): + """ + Move the cursor to the specified coordinates. + + Args: + x (int): X coordinate to move cursor to + y (int): Y coordinate to move cursor to + """ await self._send_cmd("move_cursor", {"x": x, "y": y}) async def left_click(self, x=None, y=None): + """ + Perform a left mouse click at the specified coordinates or current cursor position. + + Args: + x (int, optional): X coordinate to click at. If None, clicks at current cursor position + y (int, optional): Y coordinate to click at. If None, clicks at current cursor position + """ await self._send_cmd("left_click", {"x": x, "y": y}) async def right_click(self, x=None, y=None): + """ + Perform a right mouse click at the specified coordinates or current cursor position. + + Args: + x (int, optional): X coordinate to click at. If None, clicks at current cursor position + y (int, optional): Y coordinate to click at. If None, clicks at current cursor position + """ await self._send_cmd("right_click", {"x": x, "y": y}) async def double_click(self, x=None, y=None): + """ + Perform a double mouse click at the specified coordinates or current cursor position. + + Args: + x (int, optional): X coordinate to double-click at. If None, clicks at current cursor position + y (int, optional): Y coordinate to double-click at. If None, clicks at current cursor position + """ await self._send_cmd("double_click", {"x": x, "y": y}) async def scroll_up(self, clicks=1): + """ + Scroll up by the specified number of clicks. + + Args: + clicks (int): Number of scroll clicks to perform upward. Defaults to 1 + """ await self._send_cmd("scroll_up", {"clicks": clicks}) async def scroll_down(self, clicks=1): + """ + Scroll down by the specified number of clicks. + + Args: + clicks (int): Number of scroll clicks to perform downward. Defaults to 1 + """ await self._send_cmd("scroll_down", {"clicks": clicks}) async def drag_to(self, x, y, duration=0.5): + """ + Drag from the current cursor position to the specified coordinates. + + Args: + x (int): X coordinate to drag to + y (int): Y coordinate to drag to + duration (float): Duration of the drag operation in seconds. Defaults to 0.5 + """ await self._send_cmd("drag_to", {"x": x, "y": y, "duration": duration}) async def get_cursor_position(self): + """ + Get the current cursor position. + + Returns: + dict: Dictionary containing the current cursor coordinates + """ return await self._send_cmd("get_cursor_position") async def type_text(self, text): + """ + Type the specified text at the current cursor position. + + Args: + text (str): The text to type + """ await self._send_cmd("type_text", {"text": text}) async def press_key(self, key): + """ + Press a single key. + + Args: + key: The key to press + """ await self._send_cmd("press_key", {"key": key}) async def hotkey(self, *keys): + """ + Press multiple keys simultaneously as a hotkey combination. + + Args: + *keys: Variable number of keys to press together. Can be Key enum instances or strings + + Raises: + ValueError: If any key is not a Key enum or string type + """ actual_keys = [] for key in keys: if isinstance(key, Key): @@ -101,4 +230,14 @@ class DioramaComputerInterface: await self._send_cmd("hotkey", {"keys": actual_keys}) async def to_screen_coordinates(self, x, y): + """ + Convert coordinates to screen coordinates. + + Args: + x (int): X coordinate to convert + y (int): Y coordinate to convert + + Returns: + dict: Dictionary containing the converted screen coordinates + """ return await self._send_cmd("to_screen_coordinates", {"x": x, "y": y}) diff --git a/libs/python/mcp-server/README.md b/libs/python/mcp-server/README.md index 090ebb31..08aa763a 100644 --- a/libs/python/mcp-server/README.md +++ b/libs/python/mcp-server/README.md @@ -17,60 +17,20 @@ **cua-mcp-server** is a MCP server for the Computer-Use Agent (CUA), allowing you to run CUA through Claude Desktop or other MCP clients. -## LiteLLM Integration - -This MCP server features comprehensive liteLLM integration, allowing you to use any supported LLM provider with a simple model string configuration. - -- **Unified Configuration**: Use a single `CUA_MODEL_NAME` environment variable with a model string -- **Automatic Provider Detection**: The agent automatically detects the provider and capabilities from the model string -- **Extensive Provider Support**: Works with Anthropic, OpenAI, local models, and any liteLLM-compatible provider - -### Model String Examples: -- **Anthropic**: `"anthropic/claude-3-5-sonnet-20241022"` -- **OpenAI**: `"openai/computer-use-preview"` -- **UI-TARS**: `"huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B"` -- **Omni + Any LiteLLM**: `"omniparser+litellm/gpt-4o"`, `"omniparser+litellm/claude-3-haiku"`, `"omniparser+ollama_chat/gemma3"` - ### Get started with Agent ## Prerequisites -Before installing the MCP server, you'll need to set up full Computer-Use Agent capabilities. This includes: +Cua MCP Server requires [lume](https://github.com/trycua/cua/blob/main/libs/lume/README.md#install) to be installed. -1. Installing the Lume CLI -2. Pulling the latest macOS CUA image -3. Starting the Lume daemon service -4. Installing the required Python libraries (Optional: only needed if you want to verify the agent is working before installing MCP server) +## Install -Make sure these steps are completed and working before proceeding with the MCP server installation. - -## Installation - -Install the package from PyPI: - -```bash -pip install cua-mcp-server -``` - -This will install: -- The MCP server -- CUA agent and computer dependencies -- An executable `cua-mcp-server` script in your PATH - -## Easy Setup Script - -If you want to simplify installation, you can use this one-liner to download and run the installation script: +Download and run the installation script: ```bash curl -fsSL https://raw.githubusercontent.com/trycua/cua/main/libs/python/mcp-server/scripts/install_mcp_server.sh | bash ``` -This script will: -- Create the ~/.cua directory if it doesn't exist -- Generate a startup script at ~/.cua/start_mcp_server.sh -- Make the script executable -- The startup script automatically manages Python virtual environments and installs/updates the cua-mcp-server package - You can then use the script in your MCP configuration like this: ```json @@ -87,9 +47,9 @@ You can then use the script in your MCP configuration like this: } ``` -## Development Guide +## Development -If you want to develop with the cua-mcp-server directly without installation, you can use this configuration: +Use this configuration to develop with the cua-mcp-server directly without installation: ```json { @@ -112,61 +72,11 @@ This configuration: Just add this to your MCP client's configuration and it will use your local development version of the server. -### Troubleshooting +## Docs -If you get a `/bin/bash: ~/cua/libs/python/mcp-server/scripts/start_mcp_server.sh: No such file or directory` error, try changing the path to the script to be absolute instead of relative. - -To see the logs: -``` -tail -n 20 -f ~/Library/Logs/Claude/mcp*.log -``` - -## Claude Desktop Integration - -To use with Claude Desktop, add an entry to your Claude Desktop configuration (`claude_desktop_config.json`, typically found in `~/.config/claude-desktop/`): - -For more information on MCP with Claude Desktop, see the [official MCP User Guide](https://modelcontextprotocol.io/quickstart/user). - -## Cursor Integration - -To use with Cursor, add an MCP configuration file in one of these locations: - -- **Project-specific**: Create `.cursor/mcp.json` in your project directory -- **Global**: Create `~/.cursor/mcp.json` in your home directory - -After configuration, you can simply tell Cursor's Agent to perform computer tasks by explicitly mentioning the CUA agent, such as "Use the computer control tools to open Safari." - -For more information on MCP with Cursor, see the [official Cursor MCP documentation](https://docs.cursor.com/context/model-context-protocol). - -### First-time Usage Notes - -**API Keys**: Ensure you have valid API keys: - - Add your Anthropic API key, or other model provider API key in the Claude Desktop config (as shown above) - - Or set it as an environment variable in your shell profile - -## Configuration - -The server is configured using environment variables (can be set in the Claude Desktop config): - -| Variable | Description | Default | -|----------|-------------|---------| -| `CUA_MODEL_NAME` | Model string (e.g., "anthropic/claude-3-5-sonnet-20241022", "openai/computer-use-preview", "huggingface-local/ByteDance-Seed/UI-TARS-1.5-7B", "omniparser+litellm/gpt-4o", "omniparser+ollama_chat/gemma3") | anthropic/claude-3-5-sonnet-20241022 | -| `CUA_MAX_IMAGES` | Maximum number of images to keep in context | 3 | - -## Available Tools - -The MCP server exposes the following tools to Claude: - -1. `run_cua_task` - Run a single Computer-Use Agent task with the given instruction -2. `run_multi_cua_tasks` - Run multiple tasks in sequence - -## Usage - -Once configured, you can simply ask Claude to perform computer tasks: - -- "Open Chrome and go to github.com" -- "Create a folder called 'Projects' on my desktop" -- "Find all PDFs in my Downloads folder" -- "Take a screenshot and highlight the error message" - -Claude will automatically use your CUA agent to perform these tasks. \ No newline at end of file +- [Installation](https://trycua.com/docs/libraries/mcp-server/installation) +- [Configuration](https://trycua.com/docs/libraries/mcp-server/configuration) +- [Usage](https://trycua.com/docs/libraries/mcp-server/usage) +- [Tools](https://trycua.com/docs/libraries/mcp-server/tools) +- [Client Integrations](https://trycua.com/docs/libraries/mcp-server/client-integrations) +- [LLM Integrations](https://trycua.com/docs/libraries/mcp-server/llm-integrations) \ No newline at end of file diff --git a/libs/python/som/LICENSE b/libs/python/som/LICENSE new file mode 100644 index 00000000..691cd005 --- /dev/null +++ b/libs/python/som/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the + Program, your modified version must prominently offer all users + interacting with it remotely through a computer network (if your version + supports such interaction) an opportunity to receive the Corresponding + Source of your version by providing access to the Corresponding Source + from a network server at no charge, through some standard or customary + means of facilitating copying of software. This Corresponding Source + shall include the Corresponding Source for any work covered by version 3 + of the GNU General Public License that is incorporated pursuant to the + following paragraph. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the work with which it is combined will remain governed by version + 3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU Affero General Public License from time to time. Such new versions + will be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU Affero General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU Affero General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU Affero General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer + network, you should also make sure that it provides a way for users to + get its source. For example, if your program is a web application, its + interface could display a "Source" link that leads users to an archive + of the code. There are many ways you could offer source, and different + solutions will be better for different programs; see section 13 for the + specific requirements. + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU AGPL, see + . diff --git a/libs/python/som/README.md b/libs/python/som/README.md index 863c8856..02802bfc 100644 --- a/libs/python/som/README.md +++ b/libs/python/som/README.md @@ -75,93 +75,9 @@ for elem in result.elements: print(f"Text: '{elem.content}', confidence={elem.confidence:.3f}") ``` -## Configuration +## Docs -### Detection Parameters - -#### Box Threshold (0.3) -Controls the confidence threshold for accepting detections: -``` -High Threshold (0.3): Low Threshold (0.01): -+----------------+ +----------------+ -| | | +--------+ | -| Confident | | |Unsure?| | -| Detection | | +--------+ | -| (βœ“ Accept) | | (? Reject) | -| | | | -+----------------+ +----------------+ -conf = 0.85 conf = 0.02 -``` -- Higher values (0.3) yield more precise but fewer detections -- Lower values (0.01) catch more potential icons but increase false positives -- Default is 0.3 for optimal precision/recall balance - -#### IOU Threshold (0.1) -Controls how overlapping detections are merged: -``` -IOU = Intersection Area / Union Area - -Low Overlap (Keep Both): High Overlap (Merge): -+----------+ +----------+ -| Box1 | | Box1 | -| | vs. |+-----+ | -+----------+ ||Box2 | | - +----------+ |+-----+ | - | Box2 | +----------+ - | | - +----------+ -IOU β‰ˆ 0.05 (Keep Both) IOU β‰ˆ 0.7 (Merge) -``` -- Lower values (0.1) more aggressively remove overlapping boxes -- Higher values (0.5) allow more overlapping detections -- Default is 0.1 to handle densely packed UI elements - -### OCR Configuration - -- **Engine**: EasyOCR - - Primary choice for all platforms - - Fast initialization and processing - - Built-in English language support - - GPU acceleration when available - -- **Settings**: - - Timeout: 5 seconds - - Confidence threshold: 0.5 - - Paragraph mode: Disabled - - Language: English only - -## Performance - -### Hardware Acceleration - -#### MPS (Metal Performance Shaders) -- Multi-scale detection (640px, 1280px, 1920px) -- Test-time augmentation enabled -- Half-precision (FP16) -- Average detection time: ~0.4s -- Best for production use when available - -#### CPU -- Single-scale detection (1280px) -- Full-precision (FP32) -- Average detection time: ~1.3s -- Reliable fallback option - -### Example Output Structure - -``` -examples/output/ -β”œβ”€β”€ {timestamp}_no_ocr/ -β”‚ β”œβ”€β”€ annotated_images/ -β”‚ β”‚ └── screenshot_analyzed.png -β”‚ β”œβ”€β”€ screen_details.txt -β”‚ └── summary.json -└── {timestamp}_ocr/ - β”œβ”€β”€ annotated_images/ - β”‚ └── screenshot_analyzed.png - β”œβ”€β”€ screen_details.txt - └── summary.json -``` +- [Configuration](http://localhost:8090/docs/libraries/som/configuration) ## Development diff --git a/libs/python/som/pyproject.toml b/libs/python/som/pyproject.toml index 0bae7ea2..10b29ff8 100644 --- a/libs/python/som/pyproject.toml +++ b/libs/python/som/pyproject.toml @@ -26,12 +26,12 @@ dependencies = [ ] requires-python = ">=3.11" readme = "README.md" -license = {text = "MIT"} +license = {text = "AGPL-3.0-or-later"} keywords = ["computer-vision", "ocr", "ui-analysis", "icon-detection"] classifiers = [ "Development Status :: 4 - Beta", + "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", "Intended Audience :: Developers", - "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Artificial Intelligence", diff --git a/libs/typescript/computer/README.md b/libs/typescript/computer/README.md index b51713c2..2505ee63 100644 --- a/libs/typescript/computer/README.md +++ b/libs/typescript/computer/README.md @@ -1,28 +1,35 @@ -# Cua Computer TypeScript Library +
+

+
+ + + + Shows my svg + +
-The TypeScript library for C/cua Computer - a powerful computer control and automation library. + [![TypeScript](https://img.shields.io/badge/TypeScript-333333?logo=typescript&logoColor=white&labelColor=333333)](#) + [![macOS](https://img.shields.io/badge/macOS-000000?logo=apple&logoColor=F0F0F0)](#) + [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?&logo=discord&logoColor=white)](https://discord.com/invite/mVnXXpdE85) + [![NPM](https://img.shields.io/npm/v/@trycua/computer?color=333333)](https://www.npmjs.com/package/@trycua/computer) +

+
-## Overview +**@trycua/computer** is a Computer-Use Interface (CUI) framework powering Cua for interacting with local macOS and Linux sandboxes, Playwright-compatible, and pluggable with any AI agent systems (Cua, Langchain, CrewAI, AutoGen). Computer relies on [Lume](https://github.com/trycua/lume) for creating and managing sandbox environments. -This library is a TypeScript port of the Python computer library, providing the same functionality for controlling virtual machines and computer interfaces. It enables programmatic control of virtual machines through various providers and offers a consistent interface for interacting with the VM's operating system. +### Get started with Computer -## Installation - -```bash -npm install @trycua/computer -# or -pnpm add @trycua/computer -``` - -## Usage +
+ +
```typescript -import { Computer } from '@trycua/computer'; +import { Computer, OSType } from '@trycua/computer'; // Create a new computer instance const computer = new Computer({ osType: OSType.LINUX, - name: 's-linux-vm_id' + name: 's-linux-vm_id', apiKey: 'your-api-key' }); @@ -30,60 +37,47 @@ const computer = new Computer({ await computer.run(); // Get the computer interface for interaction -const interface = computer.interface; +const computerInterface = computer.interface; // Take a screenshot -const screenshot = await interface.getScreenshot(); +const screenshot = await computerInterface.getScreenshot(); +// In a Node.js environment, you might save it like this: +// import * as fs from 'fs'; +// fs.writeFileSync('screenshot.png', Buffer.from(screenshot)); // Click at coordinates -await interface.click(500, 300); +await computerInterface.click(500, 300); // Type text -await interface.typeText('Hello, world!'); +await computerInterface.typeText('Hello, world!'); // Stop the computer await computer.stop(); ``` -## Architecture +## Install -The library is organized into the following structure: - -### Core Components - -- **Computer Factory**: A factory object that creates appropriate computer instances -- **BaseComputer**: Abstract base class with shared functionality for all computer types -- **Types**: Type definitions for configuration options and shared interfaces - -### Provider Implementations - -- **Computer**: Implementation for cloud-based VMs - -## Development - -- Install dependencies: +To install the Computer-Use Interface (CUI): ```bash -pnpm install +npm install @trycua/computer +# or +pnpm add @trycua/computer ``` -- Run the unit tests: +The `@trycua/computer` package provides the TypeScript library for interacting with computer interfaces. -```bash -pnpm test -``` +## Run -- Build the library: +Refer to this example for a step-by-step guide on how to use the Computer-Use Interface (CUI): -```bash -pnpm build -``` +- [Computer-Use Interface (CUI)](https://github.com/trycua/cua/tree/main/examples/computer-example-ts) -- Type checking: +## Docs -```bash -pnpm typecheck -``` +- [Computers](https://trycua.com/docs/computer-sdk/computers) +- [Commands](https://trycua.com/docs/computer-sdk/commands) +- [Computer UI](https://trycua.com/docs/computer-sdk/computer-ui) ## License diff --git a/libs/typescript/core/README.md b/libs/typescript/core/README.md index bc585ce8..aecde720 100644 --- a/libs/typescript/core/README.md +++ b/libs/typescript/core/README.md @@ -1,22 +1,47 @@ -# Cua Core TypeScript Library +
+

+
+ + + + Shows my svg + +
-The core cua library with support for telemetry and other utilities. + [![TypeScript](https://img.shields.io/badge/TypeScript-333333?logo=typescript&logoColor=white&labelColor=333333)](#) + [![macOS](https://img.shields.io/badge/macOS-000000?logo=apple&logoColor=F0F0F0)](#) + [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?&logo=discord&logoColor=white)](https://discord.com/invite/mVnXXpdE85) + [![NPM](https://img.shields.io/npm/v/@trycua/core?color=333333)](https://www.npmjs.com/package/@trycua/core) +

+
+ +**Cua Core** provides essential shared functionality and utilities used across the Cua ecosystem: + +- Privacy-focused telemetry system for transparent usage analytics +- Common helper functions and utilities used by other Cua packages +- Core infrastructure components shared between modules + +## Installation + +```bash +pnpm install @trycua/core +``` ## Development -- Install dependencies: +Install dependencies: ```bash pnpm install ``` -- Run the unit tests: +Run the unit tests: ```bash pnpm test ``` -- Build the library: +Build the library: ```bash pnpm build diff --git a/notebooks/README.md b/notebooks/README.md new file mode 100644 index 00000000..0a7f4890 --- /dev/null +++ b/notebooks/README.md @@ -0,0 +1,19 @@ +# CUA Notebooks + +This folder contains Jupyter notebooks that demonstrate the core functionality of the CUA (Computer Use Automation) system. These notebooks serve as interactive examples and quickstart guides for different components of the CUA platform. + +## Available Notebooks + +### Core Components +- **`computer_nb.ipynb`** - Demonstrates the Computer API for programmatically operating sandbox VMs using either Cua Cloud Containers or local Lume VMs on Apple Silicon macOS systems +- **`agent_nb.ipynb`** - Shows how to use CUA's Agent to run automated workflows in virtual sandboxes with various AI models (OpenAI, Anthropic, local models) +- **`pylume_nb.ipynb`** - Quickstart guide for the pylume Python library, which handles VM creation, management, and image operations +- **`computer_server_nb.ipynb`** - Demonstrates how to host and configure the Computer server that powers the Computer API + +### Evaluation & Benchmarking +- **`eval_osworld.ipynb`** - Shows ComputerAgent integration with HUD for OSWorld benchmarking, supporting both Claude and OpenAI models + +### Tutorials +- **`blog/`** - Tutorial notebooks from blog posts: + - `build-your-own-operator-on-macos-1.ipynb` - Part 1: Building a CUA operator using OpenAI's computer-use-preview model + - `build-your-own-operator-on-macos-2.ipynb` - Part 2: Using the cua-agent package for more advanced automation diff --git a/notebooks/agent_nb.ipynb b/notebooks/agent_nb.ipynb index 66b66203..30746780 100644 --- a/notebooks/agent_nb.ipynb +++ b/notebooks/agent_nb.ipynb @@ -31,13 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install \"cua-agent[all]\"\n", - "\n", - "# Or install individual agent loops:\n", - "# !pip install cua-agent[openai]\n", - "# !pip install cua-agent[anthropic]\n", - "# !pip install cua-agent[uitars]\n", - "# !pip install cua-agent[omni]" + "!pip install \"cua-agent[all]\"" ] }, { @@ -78,7 +72,7 @@ "outputs": [], "source": [ "from computer import Computer, VMProviderType\n", - "from agent import ComputerAgent, LLM, AgentLoop, LLMProvider" + "from agent import ComputerAgent" ] }, { @@ -184,8 +178,8 @@ "\n", "# Create agent\n", "agent = ComputerAgent(\n", + " model=\"openai/computer-use-preview\",\n", " tools=[computer],\n", - " model=\"computer-use-preview\",\n", " trajectory_dir=str(Path(\"trajectories\")),\n", " only_n_most_recent_images=3,\n", " verbosity=logging.INFO\n", @@ -213,7 +207,7 @@ "\n", "for i, task in enumerate(tasks):\n", " print(f\"\\nExecuting task {i+1}/{len(tasks)}: {task}\")\n", - " async for result in cloud_agent.run(task):\n", + " async for result in agent.run(task):\n", " # print(result)\n", " pass\n", " print(f\"βœ… Task {i+1}/{len(tasks)} completed: {task}\")\n" @@ -223,7 +217,38 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Option 2: Agent with Local VMs (Lume daemon)\n", + "## Option 2: KASM Local Docker Containers (cross-platform)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before we can create an agent, we need to initialize a local computer with Docker provider." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "from pathlib import Path\n", + "\n", + "computer = Computer(\n", + " os_type=\"linux\",\n", + " provider_type=\"docker\",\n", + " image=\"trycua/cua-ubuntu:latest\",\n", + " name=\"my-cua-container\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option 3: Agent with Local VMs (Lume daemon)\n", "\n", "For Apple Silicon Macs, run agents on local VMs with near-native performance." ] @@ -259,7 +284,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create an agent with local VM" + "## Create an agent" ] }, { @@ -277,7 +302,7 @@ "source": [ "# Create agent with Anthropic loop and provider\n", "agent = ComputerAgent(\n", - " model=\"openai/computer-use-preview\"\n", + " model=\"openai/computer-use-preview\",\n", " tools=[computer],\n", " trajectory_dir=str(Path(\"trajectories\")),\n", " only_n_most_recent_images=3,\n", @@ -289,7 +314,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Run tasks on a local Lume VM" + "Run tasks on a computer:" ] }, { @@ -331,7 +356,7 @@ "source": [ "import logging\n", "from pathlib import Path\n", - "from agent import ComputerAgent, LLM, AgentLoop\n", + "from agent import ComputerAgent\n", "\n", "computer = Computer(verbosity=logging.INFO)\n", "\n", @@ -424,7 +449,7 @@ " model=\"openai/computer-use-preview\",\n", " trajectory_dir=str(Path(\"trajectories\")),\n", " verbosity=logging.INFO\n", - ")\n" + ")" ] }, { @@ -521,7 +546,7 @@ ], "metadata": { "kernelspec": { - "display_name": "cua312", + "display_name": "cua", "language": "python", "name": "python3" }, @@ -535,7 +560,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.9" + "version": "3.13.5" } }, "nbformat": 4, diff --git a/notebooks/blog/build-your-own-operator-on-macos-2.ipynb b/notebooks/blog/build-your-own-operator-on-macos-2.ipynb index ba039bfe..db4a8872 100644 --- a/notebooks/blog/build-your-own-operator-on-macos-2.ipynb +++ b/notebooks/blog/build-your-own-operator-on-macos-2.ipynb @@ -78,7 +78,7 @@ "outputs": [], "source": [ "from computer import Computer\n", - "from agent import ComputerAgent, LLM, AgentLoop, LLMProvider" + "from agent import ComputerAgent" ] }, { @@ -129,7 +129,7 @@ "outputs": [], "source": [ "agent = ComputerAgent(\n", - " computer=[computer],\n", + " tools=[computer],\n", " model=\"openai/computer-use-preview\",\n", " save_trajectory=True,\n", " only_n_most_recent_images=3,\n", @@ -170,7 +170,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.12.11" } }, "nbformat": 4, diff --git a/notebooks/computer_nb.ipynb b/notebooks/computer_nb.ipynb index d1d97d12..0cf35175 100644 --- a/notebooks/computer_nb.ipynb +++ b/notebooks/computer_nb.ipynb @@ -18,9 +18,470 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Skipping cua-computer as it is not installed.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting cua-computer[all]\n", + " Downloading cua_computer-0.4.4-py3-none-any.whl.metadata (5.8 kB)\n", + "Collecting pillow>=10.0.0 (from cua-computer[all])\n", + " Downloading pillow-11.3.0-cp313-cp313-win_amd64.whl.metadata (9.2 kB)\n", + "Collecting websocket-client>=1.8.0 (from cua-computer[all])\n", + " Downloading websocket_client-1.8.0-py3-none-any.whl.metadata (8.0 kB)\n", + "Collecting websockets>=12.0 (from cua-computer[all])\n", + " Downloading websockets-15.0.1-cp313-cp313-win_amd64.whl.metadata (7.0 kB)\n", + "Collecting aiohttp>=3.9.0 (from cua-computer[all])\n", + " Downloading aiohttp-3.12.15-cp313-cp313-win_amd64.whl.metadata (7.9 kB)\n", + "Collecting cua-core<0.2.0,>=0.1.0 (from cua-computer[all])\n", + " Downloading cua_core-0.1.9-py3-none-any.whl.metadata (1.6 kB)\n", + "Collecting pydantic>=2.11.1 (from cua-computer[all])\n", + " Downloading pydantic-2.11.7-py3-none-any.whl.metadata (67 kB)\n", + "Collecting gradio>=5.23.3 (from cua-computer[all])\n", + " Downloading gradio-5.43.1-py3-none-any.whl.metadata (16 kB)\n", + "Collecting python-dotenv>=1.0.1 (from cua-computer[all])\n", + " Downloading python_dotenv-1.1.1-py3-none-any.whl.metadata (24 kB)\n", + "Collecting datasets>=3.6.0 (from cua-computer[all])\n", + " Downloading datasets-4.0.0-py3-none-any.whl.metadata (19 kB)\n", + "Collecting httpx>=0.24.0 (from cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading httpx-0.28.1-py3-none-any.whl.metadata (7.1 kB)\n", + "Collecting posthog>=3.20.0 (from cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading posthog-6.6.1-py3-none-any.whl.metadata (6.0 kB)\n", + "Collecting aiohappyeyeballs>=2.5.0 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl.metadata (5.9 kB)\n", + "Collecting aiosignal>=1.4.0 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading aiosignal-1.4.0-py3-none-any.whl.metadata (3.7 kB)\n", + "Collecting attrs>=17.3.0 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading attrs-25.3.0-py3-none-any.whl.metadata (10 kB)\n", + "Collecting frozenlist>=1.1.1 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading frozenlist-1.7.0-cp313-cp313-win_amd64.whl.metadata (19 kB)\n", + "Collecting multidict<7.0,>=4.5 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading multidict-6.6.4-cp313-cp313-win_amd64.whl.metadata (5.4 kB)\n", + "Collecting propcache>=0.2.0 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading propcache-0.3.2-cp313-cp313-win_amd64.whl.metadata (12 kB)\n", + "Collecting yarl<2.0,>=1.17.0 (from aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading yarl-1.20.1-cp313-cp313-win_amd64.whl.metadata (76 kB)\n", + "Collecting idna>=2.0 (from yarl<2.0,>=1.17.0->aiohttp>=3.9.0->cua-computer[all])\n", + " Downloading idna-3.10-py3-none-any.whl.metadata (10 kB)\n", + "Collecting filelock (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading filelock-3.19.1-py3-none-any.whl.metadata (2.1 kB)\n", + "Collecting numpy>=1.17 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading numpy-2.3.2-cp313-cp313-win_amd64.whl.metadata (60 kB)\n", + "Collecting pyarrow>=15.0.0 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading pyarrow-21.0.0-cp313-cp313-win_amd64.whl.metadata (3.4 kB)\n", + "Collecting dill<0.3.9,>=0.3.0 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading dill-0.3.8-py3-none-any.whl.metadata (10 kB)\n", + "Collecting pandas (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading pandas-2.3.2-cp313-cp313-win_amd64.whl.metadata (19 kB)\n", + "Collecting requests>=2.32.2 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading requests-2.32.5-py3-none-any.whl.metadata (4.9 kB)\n", + "Collecting tqdm>=4.66.3 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading tqdm-4.67.1-py3-none-any.whl.metadata (57 kB)\n", + "Collecting xxhash (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading xxhash-3.5.0-cp313-cp313-win_amd64.whl.metadata (13 kB)\n", + "Collecting multiprocess<0.70.17 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading multiprocess-0.70.16-py312-none-any.whl.metadata (7.2 kB)\n", + "Collecting fsspec<=2025.3.0,>=2023.1.0 (from fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=3.6.0->cua-computer[all])\n", + " Downloading fsspec-2025.3.0-py3-none-any.whl.metadata (11 kB)\n", + "Collecting huggingface-hub>=0.24.0 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading huggingface_hub-0.34.4-py3-none-any.whl.metadata (14 kB)\n", + "Requirement already satisfied: packaging in c:\\programdata\\anaconda3\\envs\\cua\\lib\\site-packages (from datasets>=3.6.0->cua-computer[all]) (25.0)\n", + "Collecting pyyaml>=5.1 (from datasets>=3.6.0->cua-computer[all])\n", + " Downloading PyYAML-6.0.2-cp313-cp313-win_amd64.whl.metadata (2.1 kB)\n", + "Collecting aiofiles<25.0,>=22.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading aiofiles-24.1.0-py3-none-any.whl.metadata (10 kB)\n", + "Collecting anyio<5.0,>=3.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading anyio-4.10.0-py3-none-any.whl.metadata (4.0 kB)\n", + "Collecting audioop-lts<1.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading audioop_lts-0.2.2-cp313-abi3-win_amd64.whl.metadata (2.0 kB)\n", + "Collecting brotli>=1.1.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading Brotli-1.1.0-cp313-cp313-win_amd64.whl.metadata (5.6 kB)\n", + "Collecting fastapi<1.0,>=0.115.2 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading fastapi-0.116.1-py3-none-any.whl.metadata (28 kB)\n", + "Collecting ffmpy (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading ffmpy-0.6.1-py3-none-any.whl.metadata (2.9 kB)\n", + "Collecting gradio-client==1.12.1 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading gradio_client-1.12.1-py3-none-any.whl.metadata (7.1 kB)\n", + "Collecting groovy~=0.1 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading groovy-0.1.2-py3-none-any.whl.metadata (6.1 kB)\n", + "Collecting jinja2<4.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading jinja2-3.1.6-py3-none-any.whl.metadata (2.9 kB)\n", + "Collecting markupsafe<4.0,>=2.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl.metadata (4.1 kB)\n", + "Collecting orjson~=3.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading orjson-3.11.2-cp313-cp313-win_amd64.whl.metadata (1.2 kB)\n", + "Collecting pydub (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading pydub-0.25.1-py2.py3-none-any.whl.metadata (1.4 kB)\n", + "Collecting python-multipart>=0.0.18 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading python_multipart-0.0.20-py3-none-any.whl.metadata (1.8 kB)\n", + "Collecting ruff>=0.9.3 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading ruff-0.12.10-py3-none-win_amd64.whl.metadata (26 kB)\n", + "Collecting safehttpx<0.2.0,>=0.1.6 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading safehttpx-0.1.6-py3-none-any.whl.metadata (4.2 kB)\n", + "Collecting semantic-version~=2.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading semantic_version-2.10.0-py2.py3-none-any.whl.metadata (9.7 kB)\n", + "Collecting starlette<1.0,>=0.40.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading starlette-0.47.3-py3-none-any.whl.metadata (6.2 kB)\n", + "Collecting tomlkit<0.14.0,>=0.12.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading tomlkit-0.13.3-py3-none-any.whl.metadata (2.8 kB)\n", + "Collecting typer<1.0,>=0.12 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading typer-0.16.1-py3-none-any.whl.metadata (15 kB)\n", + "Requirement already satisfied: typing-extensions~=4.0 in c:\\programdata\\anaconda3\\envs\\cua\\lib\\site-packages (from gradio>=5.23.3->cua-computer[all]) (4.14.1)\n", + "Collecting uvicorn>=0.14.0 (from gradio>=5.23.3->cua-computer[all])\n", + " Downloading uvicorn-0.35.0-py3-none-any.whl.metadata (6.5 kB)\n", + "Collecting sniffio>=1.1 (from anyio<5.0,>=3.0->gradio>=5.23.3->cua-computer[all])\n", + " Downloading sniffio-1.3.1-py3-none-any.whl.metadata (3.9 kB)\n", + "Collecting certifi (from httpx>=0.24.0->cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading certifi-2025.8.3-py3-none-any.whl.metadata (2.4 kB)\n", + "Collecting httpcore==1.* (from httpx>=0.24.0->cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading httpcore-1.0.9-py3-none-any.whl.metadata (21 kB)\n", + "Collecting h11>=0.16 (from httpcore==1.*->httpx>=0.24.0->cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading h11-0.16.0-py3-none-any.whl.metadata (8.3 kB)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in c:\\programdata\\anaconda3\\envs\\cua\\lib\\site-packages (from pandas->datasets>=3.6.0->cua-computer[all]) (2.9.0.post0)\n", + "Collecting pytz>=2020.1 (from pandas->datasets>=3.6.0->cua-computer[all])\n", + " Downloading pytz-2025.2-py2.py3-none-any.whl.metadata (22 kB)\n", + "Collecting tzdata>=2022.7 (from pandas->datasets>=3.6.0->cua-computer[all])\n", + " Downloading tzdata-2025.2-py2.py3-none-any.whl.metadata (1.4 kB)\n", + "Collecting annotated-types>=0.6.0 (from pydantic>=2.11.1->cua-computer[all])\n", + " Downloading annotated_types-0.7.0-py3-none-any.whl.metadata (15 kB)\n", + "Collecting pydantic-core==2.33.2 (from pydantic>=2.11.1->cua-computer[all])\n", + " Downloading pydantic_core-2.33.2-cp313-cp313-win_amd64.whl.metadata (6.9 kB)\n", + "Collecting typing-inspection>=0.4.0 (from pydantic>=2.11.1->cua-computer[all])\n", + " Downloading typing_inspection-0.4.1-py3-none-any.whl.metadata (2.6 kB)\n", + "Collecting click>=8.0.0 (from typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all])\n", + " Downloading click-8.2.1-py3-none-any.whl.metadata (2.5 kB)\n", + "Collecting shellingham>=1.3.0 (from typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all])\n", + " Downloading shellingham-1.5.4-py2.py3-none-any.whl.metadata (3.5 kB)\n", + "Collecting rich>=10.11.0 (from typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all])\n", + " Downloading rich-14.1.0-py3-none-any.whl.metadata (18 kB)\n", + "Requirement already satisfied: colorama in c:\\programdata\\anaconda3\\envs\\cua\\lib\\site-packages (from click>=8.0.0->typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all]) (0.4.6)\n", + "Requirement already satisfied: six>=1.5 in c:\\programdata\\anaconda3\\envs\\cua\\lib\\site-packages (from posthog>=3.20.0->cua-core<0.2.0,>=0.1.0->cua-computer[all]) (1.17.0)\n", + "Collecting backoff>=1.10.0 (from posthog>=3.20.0->cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading backoff-2.2.1-py3-none-any.whl.metadata (14 kB)\n", + "Collecting distro>=1.5.0 (from posthog>=3.20.0->cua-core<0.2.0,>=0.1.0->cua-computer[all])\n", + " Downloading distro-1.9.0-py3-none-any.whl.metadata (6.8 kB)\n", + "Collecting charset_normalizer<4,>=2 (from requests>=2.32.2->datasets>=3.6.0->cua-computer[all])\n", + " Downloading charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl.metadata (37 kB)\n", + "Collecting urllib3<3,>=1.21.1 (from requests>=2.32.2->datasets>=3.6.0->cua-computer[all])\n", + " Downloading urllib3-2.5.0-py3-none-any.whl.metadata (6.5 kB)\n", + "Collecting markdown-it-py>=2.2.0 (from rich>=10.11.0->typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all])\n", + " Downloading markdown_it_py-4.0.0-py3-none-any.whl.metadata (7.3 kB)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\programdata\\anaconda3\\envs\\cua\\lib\\site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all]) (2.19.2)\n", + "Collecting mdurl~=0.1 (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio>=5.23.3->cua-computer[all])\n", + " Downloading mdurl-0.1.2-py3-none-any.whl.metadata (1.6 kB)\n", + "Downloading cua_computer-0.4.4-py3-none-any.whl (87 kB)\n", + "Downloading cua_core-0.1.9-py3-none-any.whl (5.2 kB)\n", + "Downloading aiohttp-3.12.15-cp313-cp313-win_amd64.whl (449 kB)\n", + "Downloading multidict-6.6.4-cp313-cp313-win_amd64.whl (45 kB)\n", + "Downloading yarl-1.20.1-cp313-cp313-win_amd64.whl (86 kB)\n", + "Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl (15 kB)\n", + "Downloading aiosignal-1.4.0-py3-none-any.whl (7.5 kB)\n", + "Downloading attrs-25.3.0-py3-none-any.whl (63 kB)\n", + "Downloading datasets-4.0.0-py3-none-any.whl (494 kB)\n", + "Downloading dill-0.3.8-py3-none-any.whl (116 kB)\n", + "Downloading fsspec-2025.3.0-py3-none-any.whl (193 kB)\n", + "Downloading multiprocess-0.70.16-py312-none-any.whl (146 kB)\n", + "Downloading frozenlist-1.7.0-cp313-cp313-win_amd64.whl (43 kB)\n", + "Downloading gradio-5.43.1-py3-none-any.whl (59.6 MB)\n", + " ---------------------------------------- 0.0/59.6 MB ? eta -:--:--\n", + " -------------------------------------- - 57.4/59.6 MB 278.0 MB/s eta 0:00:01\n", + " --------------------------------------- 59.5/59.6 MB 277.3 MB/s eta 0:00:01\n", + " ---------------------------------------- 59.6/59.6 MB 138.5 MB/s eta 0:00:00\n", + "Downloading gradio_client-1.12.1-py3-none-any.whl (324 kB)\n", + "Downloading aiofiles-24.1.0-py3-none-any.whl (15 kB)\n", + "Downloading anyio-4.10.0-py3-none-any.whl (107 kB)\n", + "Downloading audioop_lts-0.2.2-cp313-abi3-win_amd64.whl (30 kB)\n", + "Downloading fastapi-0.116.1-py3-none-any.whl (95 kB)\n", + "Downloading groovy-0.1.2-py3-none-any.whl (14 kB)\n", + "Downloading httpx-0.28.1-py3-none-any.whl (73 kB)\n", + "Downloading httpcore-1.0.9-py3-none-any.whl (78 kB)\n", + "Downloading huggingface_hub-0.34.4-py3-none-any.whl (561 kB)\n", + " ---------------------------------------- 0.0/561.5 kB ? eta -:--:--\n", + " --------------------------------------- 561.5/561.5 kB 18.0 MB/s eta 0:00:00\n", + "Downloading jinja2-3.1.6-py3-none-any.whl (134 kB)\n", + "Downloading MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl (15 kB)\n", + "Downloading numpy-2.3.2-cp313-cp313-win_amd64.whl (12.8 MB)\n", + " ---------------------------------------- 0.0/12.8 MB ? eta -:--:--\n", + " ---------------------------------------- 12.8/12.8 MB 129.9 MB/s eta 0:00:00\n", + "Downloading orjson-3.11.2-cp313-cp313-win_amd64.whl (119 kB)\n", + "Downloading pandas-2.3.2-cp313-cp313-win_amd64.whl (11.0 MB)\n", + " ---------------------------------------- 0.0/11.0 MB ? eta -:--:--\n", + " ---------------------------------------- 11.0/11.0 MB 84.1 MB/s eta 0:00:00\n", + "Downloading pillow-11.3.0-cp313-cp313-win_amd64.whl (7.0 MB)\n", + " ---------------------------------------- 0.0/7.0 MB ? eta -:--:--\n", + " ---------------------------------------- 7.0/7.0 MB 107.0 MB/s eta 0:00:00\n", + "Downloading pydantic-2.11.7-py3-none-any.whl (444 kB)\n", + "Downloading pydantic_core-2.33.2-cp313-cp313-win_amd64.whl (2.0 MB)\n", + " ---------------------------------------- 0.0/2.0 MB ? eta -:--:--\n", + " ---------------- ----------------------- 0.8/2.0 MB 6.0 MB/s eta 0:00:01\n", + " ---------------------------------------- 2.0/2.0 MB 13.6 MB/s eta 0:00:00\n", + "Downloading PyYAML-6.0.2-cp313-cp313-win_amd64.whl (156 kB)\n", + "Downloading safehttpx-0.1.6-py3-none-any.whl (8.7 kB)\n", + "Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n", + "Downloading starlette-0.47.3-py3-none-any.whl (72 kB)\n", + "Downloading tomlkit-0.13.3-py3-none-any.whl (38 kB)\n", + "Downloading typer-0.16.1-py3-none-any.whl (46 kB)\n", + "Downloading websockets-15.0.1-cp313-cp313-win_amd64.whl (176 kB)\n", + "Downloading annotated_types-0.7.0-py3-none-any.whl (13 kB)\n", + "Downloading Brotli-1.1.0-cp313-cp313-win_amd64.whl (358 kB)\n", + "Downloading click-8.2.1-py3-none-any.whl (102 kB)\n", + "Downloading h11-0.16.0-py3-none-any.whl (37 kB)\n", + "Downloading idna-3.10-py3-none-any.whl (70 kB)\n", + "Downloading posthog-6.6.1-py3-none-any.whl (119 kB)\n", + "Downloading requests-2.32.5-py3-none-any.whl (64 kB)\n", + "Downloading charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl (107 kB)\n", + "Downloading urllib3-2.5.0-py3-none-any.whl (129 kB)\n", + "Downloading backoff-2.2.1-py3-none-any.whl (15 kB)\n", + "Downloading certifi-2025.8.3-py3-none-any.whl (161 kB)\n", + "Downloading distro-1.9.0-py3-none-any.whl (20 kB)\n", + "Downloading propcache-0.3.2-cp313-cp313-win_amd64.whl (40 kB)\n", + "Downloading pyarrow-21.0.0-cp313-cp313-win_amd64.whl (26.1 MB)\n", + " ---------------------------------------- 0.0/26.1 MB ? eta -:--:--\n", + " -------------------------------- ------- 21.0/26.1 MB 151.4 MB/s eta 0:00:01\n", + " --------------------------------------- 26.0/26.1 MB 63.3 MB/s eta 0:00:01\n", + " ---------------------------------------- 26.1/26.1 MB 50.1 MB/s eta 0:00:00\n", + "Downloading python_dotenv-1.1.1-py3-none-any.whl (20 kB)\n", + "Downloading python_multipart-0.0.20-py3-none-any.whl (24 kB)\n", + "Downloading pytz-2025.2-py2.py3-none-any.whl (509 kB)\n", + "Downloading rich-14.1.0-py3-none-any.whl (243 kB)\n", + "Downloading markdown_it_py-4.0.0-py3-none-any.whl (87 kB)\n", + "Downloading mdurl-0.1.2-py3-none-any.whl (10.0 kB)\n", + "Downloading ruff-0.12.10-py3-none-win_amd64.whl (13.0 MB)\n", + " ---------------------------------------- 0.0/13.0 MB ? eta -:--:--\n", + " ---------------------------------------- 13.0/13.0 MB 143.1 MB/s eta 0:00:00\n", + "Downloading shellingham-1.5.4-py2.py3-none-any.whl (9.8 kB)\n", + "Downloading sniffio-1.3.1-py3-none-any.whl (10 kB)\n", + "Downloading tqdm-4.67.1-py3-none-any.whl (78 kB)\n", + "Downloading typing_inspection-0.4.1-py3-none-any.whl (14 kB)\n", + "Downloading tzdata-2025.2-py2.py3-none-any.whl (347 kB)\n", + "Downloading uvicorn-0.35.0-py3-none-any.whl (66 kB)\n", + "Downloading websocket_client-1.8.0-py3-none-any.whl (58 kB)\n", + "Downloading ffmpy-0.6.1-py3-none-any.whl (5.5 kB)\n", + "Downloading filelock-3.19.1-py3-none-any.whl (15 kB)\n", + "Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n", + "Downloading xxhash-3.5.0-cp313-cp313-win_amd64.whl (30 kB)\n", + "Installing collected packages: pytz, pydub, brotli, xxhash, websockets, websocket-client, urllib3, tzdata, typing-inspection, tqdm, tomlkit, sniffio, shellingham, semantic-version, ruff, pyyaml, python-multipart, python-dotenv, pydantic-core, pyarrow, propcache, pillow, orjson, numpy, multidict, mdurl, markupsafe, idna, h11, groovy, fsspec, frozenlist, filelock, ffmpy, distro, dill, click, charset_normalizer, certifi, backoff, audioop-lts, attrs, annotated-types, aiohappyeyeballs, aiofiles, yarl, uvicorn, requests, pydantic, pandas, multiprocess, markdown-it-py, jinja2, httpcore, anyio, aiosignal, starlette, rich, posthog, huggingface-hub, httpx, aiohttp, typer, safehttpx, gradio-client, fastapi, cua-core, gradio, datasets, cua-computer\n", + "\n", + " ---------------------------------------- 0/70 [pytz]\n", + " ---------------------------------------- 0/70 [pytz]\n", + " ---------------------------------------- 0/70 [pytz]\n", + " --------------------------------------- 1/70 [pydub]\n", + " -- ------------------------------------- 4/70 [websockets]\n", + " -- ------------------------------------- 4/70 [websockets]\n", + " --- ------------------------------------ 6/70 [urllib3]\n", + " --- ------------------------------------ 6/70 [urllib3]\n", + " ---- ----------------------------------- 7/70 [tzdata]\n", + " ---- ----------------------------------- 7/70 [tzdata]\n", + " ---- ----------------------------------- 7/70 [tzdata]\n", + " ----- ---------------------------------- 9/70 [tqdm]\n", + " ----- ---------------------------------- 10/70 [tomlkit]\n", + " ------- -------------------------------- 13/70 [semantic-version]\n", + " -------- ------------------------------- 14/70 [ruff]\n", + " -------- ------------------------------- 14/70 [ruff]\n", + " --------- ------------------------------ 16/70 [python-multipart]\n", + " ---------- ----------------------------- 18/70 [pydantic-core]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ---------- ----------------------------- 19/70 [pyarrow]\n", + " ------------ --------------------------- 21/70 [pillow]\n", + " ------------ --------------------------- 21/70 [pillow]\n", + " ------------ --------------------------- 21/70 [pillow]\n", + " ------------ --------------------------- 21/70 [pillow]\n", + " ------------ --------------------------- 21/70 [pillow]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " ------------- -------------------------- 23/70 [numpy]\n", + " -------------- ------------------------- 26/70 [markupsafe]\n", + " --------------- ------------------------ 27/70 [idna]\n", + " ----------------- ---------------------- 30/70 [fsspec]\n", + " ----------------- ---------------------- 30/70 [fsspec]\n", + " ----------------- ---------------------- 30/70 [fsspec]\n", + " ------------------- -------------------- 34/70 [distro]\n", + " -------------------- ------------------- 35/70 [dill]\n", + " -------------------- ------------------- 36/70 [click]\n", + " --------------------- ------------------ 37/70 [charset_normalizer]\n", + " ---------------------- ----------------- 40/70 [audioop-lts]\n", + " ------------------------ --------------- 43/70 [aiohappyeyeballs]\n", + " -------------------------- ------------- 46/70 [uvicorn]\n", + " -------------------------- ------------- 46/70 [uvicorn]\n", + " --------------------------- ------------ 48/70 [pydantic]\n", + " --------------------------- ------------ 48/70 [pydantic]\n", + " --------------------------- ------------ 48/70 [pydantic]\n", + " --------------------------- ------------ 48/70 [pydantic]\n", + " --------------------------- ------------ 48/70 [pydantic]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 49/70 [pandas]\n", + " ---------------------------- ----------- 50/70 [multiprocess]\n", + " ---------------------------- ----------- 50/70 [multiprocess]\n", + " ----------------------------- ---------- 51/70 [markdown-it-py]\n", + " ----------------------------- ---------- 52/70 [jinja2]\n", + " ----------------------------- ---------- 52/70 [jinja2]\n", + " ------------------------------ --------- 53/70 [httpcore]\n", + " ------------------------------ --------- 54/70 [anyio]\n", + " -------------------------------- ------- 56/70 [starlette]\n", + " -------------------------------- ------- 56/70 [starlette]\n", + " -------------------------------- ------- 57/70 [rich]\n", + " -------------------------------- ------- 57/70 [rich]\n", + " -------------------------------- ------- 57/70 [rich]\n", + " --------------------------------- ------ 58/70 [posthog]\n", + " --------------------------------- ------ 59/70 [huggingface-hub]\n", + " --------------------------------- ------ 59/70 [huggingface-hub]\n", + " --------------------------------- ------ 59/70 [huggingface-hub]\n", + " --------------------------------- ------ 59/70 [huggingface-hub]\n", + " --------------------------------- ------ 59/70 [huggingface-hub]\n", + " ---------------------------------- ----- 60/70 [httpx]\n", + " ---------------------------------- ----- 61/70 [aiohttp]\n", + " ---------------------------------- ----- 61/70 [aiohttp]\n", + " ---------------------------------- ----- 61/70 [aiohttp]\n", + " ----------------------------------- ---- 62/70 [typer]\n", + " ------------------------------------ --- 64/70 [gradio-client]\n", + " ------------------------------------- -- 65/70 [fastapi]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 67/70 [gradio]\n", + " -------------------------------------- - 68/70 [datasets]\n", + " -------------------------------------- - 68/70 [datasets]\n", + " -------------------------------------- - 68/70 [datasets]\n", + " -------------------------------------- - 68/70 [datasets]\n", + " -------------------------------------- - 68/70 [datasets]\n", + " --------------------------------------- 69/70 [cua-computer]\n", + " --------------------------------------- 69/70 [cua-computer]\n", + " ---------------------------------------- 70/70 [cua-computer]\n", + "\n", + "Successfully installed aiofiles-24.1.0 aiohappyeyeballs-2.6.1 aiohttp-3.12.15 aiosignal-1.4.0 annotated-types-0.7.0 anyio-4.10.0 attrs-25.3.0 audioop-lts-0.2.2 backoff-2.2.1 brotli-1.1.0 certifi-2025.8.3 charset_normalizer-3.4.3 click-8.2.1 cua-computer-0.4.4 cua-core-0.1.9 datasets-4.0.0 dill-0.3.8 distro-1.9.0 fastapi-0.116.1 ffmpy-0.6.1 filelock-3.19.1 frozenlist-1.7.0 fsspec-2025.3.0 gradio-5.43.1 gradio-client-1.12.1 groovy-0.1.2 h11-0.16.0 httpcore-1.0.9 httpx-0.28.1 huggingface-hub-0.34.4 idna-3.10 jinja2-3.1.6 markdown-it-py-4.0.0 markupsafe-3.0.2 mdurl-0.1.2 multidict-6.6.4 multiprocess-0.70.16 numpy-2.3.2 orjson-3.11.2 pandas-2.3.2 pillow-11.3.0 posthog-6.6.1 propcache-0.3.2 pyarrow-21.0.0 pydantic-2.11.7 pydantic-core-2.33.2 pydub-0.25.1 python-dotenv-1.1.1 python-multipart-0.0.20 pytz-2025.2 pyyaml-6.0.2 requests-2.32.5 rich-14.1.0 ruff-0.12.10 safehttpx-0.1.6 semantic-version-2.10.0 shellingham-1.5.4 sniffio-1.3.1 starlette-0.47.3 tomlkit-0.13.3 tqdm-4.67.1 typer-0.16.1 typing-inspection-0.4.1 tzdata-2025.2 urllib3-2.5.0 uvicorn-0.35.0 websocket-client-1.8.0 websockets-15.0.1 xxhash-3.5.0 yarl-1.20.1\n" + ] + } + ], "source": [ "!pip uninstall -y cua-computer\n", "!pip install \"cua-computer[all]\"" @@ -153,6 +614,8 @@ "metadata": {}, "outputs": [], "source": [ + "await computer.run() # Initialize the computer first\n", + "\n", "screenshot = await computer.interface.screenshot()\n", "\n", "with open(\"screenshot.png\", \"wb\") as f:\n", @@ -163,7 +626,43 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Option 2: Local VMs (Lume daemon)\n", + "## Option 2: KASM Local Docker Containers (cross-platform)\n", + "\n", + "### Pull from Docker Hub\n", + "```bash\n", + "docker pull --platform=linux/amd64 trycua/cua-ubuntu:latest\n", + "```\n", + "\n", + "### Or build locally\n", + "```bash\n", + "cd libs/kasm\n", + "docker build -t cua-ubuntu:latest .\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from computer import Computer\n", + "\n", + "computer = Computer(\n", + " os_type=\"linux\",\n", + " provider_type=\"docker\",\n", + " image=\"trycua/cua-ubuntu:latest\",\n", + " name=\"my-cua-container\"\n", + ")\n", + "\n", + "await computer.run() # Launch & connect to Docker container" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option 3: Local VMs (Lume daemon)\n", "\n", "For Apple Silicon Macs, you can run VMs locally using the Lume daemon." ] @@ -460,7 +959,8 @@ "dir_exists = await computer.interface.directory_exists(\"/path/to/directory\")\n", "\n", "# Run shell commands\n", - "stdout, stderr = await computer.interface.run_command(\"ls -la\")" + "result = await computer.interface.run_command(\"ls -la\")\n", + "stdout, stderr, returncode = result.stdout, result.stderr, result.returncode" ] }, { @@ -587,7 +1087,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv", + "display_name": "cua", "language": "python", "name": "python3" }, @@ -601,7 +1101,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" + "version": "3.13.5" } }, "nbformat": 4, diff --git a/notebooks/computer_server_nb.ipynb b/notebooks/computer_server_nb.ipynb index fee6e048..8a6867ba 100644 --- a/notebooks/computer_server_nb.ipynb +++ b/notebooks/computer_server_nb.ipynb @@ -49,36 +49,6 @@ "## Start the Computer server" ] }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "==> Starting computer-server on 0.0.0.0:8000...\n", - "Starting computer-server on 0.0.0.0:8000...\n", - "\u001b[32mINFO\u001b[0m: Started server process [\u001b[36m65480\u001b[0m]\n", - "\u001b[32mINFO\u001b[0m: Waiting for application startup.\n", - "\u001b[32mINFO\u001b[0m: Application startup complete.\n", - "\u001b[32mINFO\u001b[0m: Uvicorn running on \u001b[1mhttp://0.0.0.0:8000\u001b[0m (Press CTRL+C to quit)\n", - "^C\n", - "\u001b[32mINFO\u001b[0m: Shutting down\n", - "\u001b[32mINFO\u001b[0m: Waiting for application shutdown.\n", - "\u001b[32mINFO\u001b[0m: Application shutdown complete.\n", - "\u001b[32mINFO\u001b[0m: Finished server process [\u001b[36m65480\u001b[0m]\n" - ] - } - ], - "source": [ - "import os\n", - "# os.chdir('../../scripts')\n", - "\n", - "! ./run_computer_server.sh\n" - ] - }, { "cell_type": "code", "execution_count": null, @@ -109,7 +79,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" + "version": "3.13.5" } }, "nbformat": 4, diff --git a/notebooks/eval_osworld.ipynb b/notebooks/eval_osworld.ipynb index 7b00795a..be3ab2ac 100644 --- a/notebooks/eval_osworld.ipynb +++ b/notebooks/eval_osworld.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -34,7 +34,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -43,720 +43,83 @@ "# - ANTHROPIC_API_KEY (for Claude models)\n", "# - OPENAI_API_KEY (for OpenAI models)\n", "\n", - "from hud import gym, load_taskset\n", - "from pprint import pprint\n", - "import asyncio" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Import the HUD-integrated ComputerAgent\n", - "from agent.integrations.hud import ComputerAgent" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total tasks in OSWorld: 367\n", - "Task prompt: Can you make my computer bring back the last tab I shut down?\n" - ] - } - ], - "source": [ - "# Load OSWorld taskset\n", - "taskset = await load_taskset(\"OSWorld-Verified\")\n", - "print(f\"Total tasks in OSWorld: {len(taskset)}\")\n", - "\n", - "# Select a test task\n", - "test = taskset[148]\n", - "print(f\"Task prompt: {test.prompt}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total tasks in SheetBench: 50\n", - "Task prompt: Given the Input data, determine the ticker with the greatest correlation between volume and next day price change.\n", - "- in ANSWER tab put the Ticker in A1 and the correlation in B1\n", - " - use CORREL to determine correlation\n", - "- be sure to first sort the date by ticker z to a and then date ascending before calculating nextdaypricechange %\n", - "Correlation should be rounded to 2 decimal points\n" - ] - } - ], - "source": [ - "# Load SheetBench taskset\n", - "taskset = await load_taskset(\"SheetBench-V2\")\n", - "print(f\"Total tasks in SheetBench: {len(taskset)}\")\n", - "\n", - "# Select a test task\n", - "test = taskset[0]\n", - "print(f\"Task prompt: {test.prompt}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] 2025-08-08 19:08:17,078 | hud.environment | View the live trace at https://app.hud.so/trace/ca88c178-cf40-499b-8ad3-d5d60348d9fe\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Environment ready!\n" - ] - } - ], - "source": [ - "# Create environment (takes ~2.5 minutes to start)\n", - "env = await gym.make(test)\n", - "print(\"Environment ready!\")" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'\\n
\\n
\\n \\n
\\n
\\n '" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await env.stream() # vnc" + "from pprint import pprint" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Test with any supported CUA model\n", + "## Quick single-task smoke test on OSWorld-Verified\n", "\n", "The ComputerAgent integration can use Claude, OpenAI, UI-TARS, or composed models just like the original ComputerAgent:" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 1, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\dillo\\miniconda3\\envs\\cua\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Created agent: computeragent-computer-use-preview\n" + "\n", + "\u001b[90m╔═════════════════════════════════════════════════════════════════╗\u001b[0m\n", + "\u001b[90mβ•‘\u001b[0m πŸš€ See your agent live at: \u001b[90mβ•‘\u001b[0m\n", + "\u001b[90mβ•Ÿβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β•’\u001b[0m\n", + "\u001b[90mβ•‘\u001b[0m \u001b[1m\u001b[33mhttps://app.hud.so/trace/cbe6f71b-f520-4630-9f27-778647070327\u001b[0m \u001b[90mβ•‘\u001b[0m\n", + "\u001b[90mβ•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•\u001b[0m\n", + "\n", + "Running: Can you make my computer bring back the last tab I shut down?\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-08-27 16:17:53,047 - agent.ComputerAgent - INFO - LLM processing started with 2 messages\n", + "2025-08-27 16:18:02,697 - agent.ComputerAgent - INFO - LLM processing started with 5 messages\n", + "2025-08-27 16:18:15,887 - agent.ComputerAgent - INFO - LLM processing started with 7 messages\n", + "2025-08-27 16:18:28,541 - agent.ComputerAgent - INFO - LLM processing started with 9 messages\n", + "2025-08-27 16:18:42,176 - agent.ComputerAgent - INFO - LLM processing started with 11 messages\n", + "2025-08-27 16:18:55,937 - agent.ComputerAgent - INFO - LLM processing started with 13 messages\n", + "2025-08-27 16:19:11,654 - agent.ComputerAgent - INFO - LLM processing started with 15 messages\n", + "2025-08-27 16:19:23,839 - agent.ComputerAgent - INFO - LLM processing started with 17 messages\n", + "2025-08-27 16:19:39,065 - agent.ComputerAgent - INFO - LLM processing started with 19 messages\n", + "Tool execution failed: Tool evaluate has an output schema but did not return structured content\n", + "Evaluation phase failed: [MCPToolResult(meta=None, content=[TextContent(type='text', text='Tool evaluate has an output schema but did not return structured content', annotations=None, meta=None)], structuredContent=None, isError=True)]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "βœ… Reward: 0.0\n", + "\n", + "\u001b[92mβœ“ Trace complete!\u001b[0m \u001b[2mView at:\u001b[0m \u001b[1m\u001b[33mhttps://app.hud.so/trace/cbe6f71b-f520-4630-9f27-778647070327\u001b[0m\n", + "\n" ] } ], "source": [ - "import logging\n", - "# Create ComputerAgent with Claude\n", - "claude_agent = ComputerAgent(\n", - " # model=\"anthropic/claude-3-5-sonnet-20241022\",\n", - " model=\"openai/computer-use-preview\",\n", - " # environment=\"linux\", # OSWorld typically uses Linux\n", - " environment=\"browser\", # SheetBench uses the browser\n", - " trajectory_dir=\"trajectories\",\n", - " verbosity=logging.INFO,\n", - ")\n", + "from agent.integrations.hud import run_single_task\n", "\n", - "print(f\"Created agent: {claude_agent.name}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Initial observation complete\n", - "========= Step 1 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:14:10,479 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "2025-08-08 19:14:18,867 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 55, 'y': 149})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [ClickAction(type='click', reasoning='Sorting dataset for analysis preparation', logs={'conversation_length': 3}, point=Point(x=77, y=174), button='left', pattern=None, hold_keys=None)]\n", - "========= Step 2 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:14:24,566 - agent.ComputerAgent - INFO - LLM processing started with 4 messages\n", - "2025-08-08 19:14:30,430 - agent.ComputerAgent - INFO - Computer: keypress({'keys': ['CTRL', 'A']})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [PressAction(type='press', reasoning='Sorting dataset for analysis preparation', logs={'conversation_length': 5}, keys=['ctrl', 'a'])]\n", - "========= Step 3 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:14:36,137 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "2025-08-08 19:14:42,483 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 73, 'y': 151})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [ClickAction(type='click', reasoning='Sorting dataset for analysis preparation', logs={'conversation_length': 7}, point=Point(x=102, y=176), button='left', pattern=None, hold_keys=None)]\n", - "========= Step 4 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:14:48,687 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "2025-08-08 19:14:59,516 - agent.ComputerAgent - INFO - Computer: keypress({'keys': ['CTRL', 'A']})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [PressAction(type='press', reasoning='Sorting dataset for analysis preparation', logs={'conversation_length': 9}, keys=['ctrl', 'a'])]\n", - "========= Step 5 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:15:05,229 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "2025-08-08 19:15:15,153 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 55, 'y': 147}, {'x': 319, 'y': 713}]})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [DragAction(type='drag', reasoning='Highlighting data for sorting preparation', logs={'conversation_length': 12}, path=[Point(x=77, y=172), Point(x=448, y=835)], pattern=None, hold_keys=None)]\n", - "========= Step 6 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:15:21,362 - agent.ComputerAgent - INFO - LLM processing started with 13 messages\n", - "2025-08-08 19:15:33,774 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 229, 'y': 41})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [ClickAction(type='click', reasoning='Opening sort options for data', logs={'conversation_length': 15}, point=Point(x=322, y=48), button='left', pattern=None, hold_keys=None)]\n", - "========= Step 7 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:15:39,973 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "2025-08-08 19:15:52,928 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 430, 'y': 96})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [ClickAction(type='click', reasoning='Choosing \"Sort range\" for sorting', logs={'conversation_length': 18}, point=Point(x=604, y=112), button='left', pattern=None, hold_keys=None)]\n", - "========= Step 8 ==========\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-08 19:15:59,611 - agent.ComputerAgent - INFO - LLM processing started with 19 messages\n", - "2025-08-08 19:16:17,003 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 530, 'y': 172})\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Agent's action: [ClickAction(type='click', reasoning='Accessing advanced sorting options now', logs={'conversation_length': 21}, point=Point(x=745, y=201), button='left', pattern=None, hold_keys=None)]\n" - ] - } - ], - "source": [ - "# Initial observation\n", - "obs, _ = await env.reset()\n", - "print(\"Initial observation complete\")\n", - "\n", - "# Agent loop with Claude\n", - "for i in range(8):\n", - " print(f\"========= Step {i + 1} ==========\")\n", - " \n", - " try:\n", - " action, done = await claude_agent.predict(obs)\n", - " print(f\"Agent's action: {action}\")\n", - "\n", - " obs, reward, terminated, info = await env.step(action)\n", - "\n", - " if done or terminated:\n", - " print(f\"Task completed after {i + 1} steps\")\n", - " break\n", - " \n", - " except Exception as e:\n", - " print(f\"Error in step {i + 1}: {e}\")\n", - " break" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Evaluate Results" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "=== Final Evaluation ===\n", - "{'error': None,\n", - " 'gold_file_url': 'https://gahludmjcsmszgyufydt.supabase.co//storage/v1/object/public/sheetbench/615426c8-9df7-4ffa-92e9-200134a84da9/gold_solution_2.xlsx?',\n", - " 'logs': 'INFO: Starting evaluation with evaluator: sheets_cell_values\\n'\n", - " \"INFO: Evaluator args: [{'A1': 'ABC', 'B1': '-0.08'}]\\n\"\n", - " 'INFO: Partial rewarding: False\\n'\n", - " 'INFO: Starting sheets_cell_values evaluation for environment: '\n", - " 'af7a34a0-43b0-44d2-82d0-2b66ed16f1ea\\n'\n", - " \"INFO: Raw args received: [{'A1': 'ABC', 'B1': '-0.08'}] (type: \"\n", - " \")\\n\"\n", - " 'INFO: Partial rewarding enabled: False\\n'\n", - " 'INFO: === Google Sheets Cell Value Verification ===\\n'\n", - " 'INFO: Current page URL: '\n", - " 'https://docs.google.com/spreadsheets/d/1h-Ec3rW9sAME2sTn8qxIvFxO6qXtdURPacEFL5DJnqw/edit?gid=700326861#gid=700326861\\n'\n", - " 'INFO: βœ… Confirmed on Google Sheets page\\n'\n", - " 'INFO: Processing args parameter...\\n'\n", - " 'INFO: Args is a list with 1 items, extracting first item\\n'\n", - " \"INFO: Extracted: {'A1': 'ABC', 'B1': '-0.08'} (type: )\\n\"\n", - " 'INFO: Cell checks to perform: 2 cells\\n'\n", - " \"INFO: A1 -> expected: 'ABC'\\n\"\n", - " \"INFO: B1 -> expected: '-0.08'\\n\"\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " \"sheets_cell_values: Checking cells: {'A1': 'ABC', 'B1': '-0.08'}\\n\"\n", - " 'INFO: === ANSWER Sheet Navigation ===\\n'\n", - " 'INFO: Attempt 1/3: Attempting to find and navigate to ANSWER sheet '\n", - " 'tab...\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Attempt 1/3: Attempting to navigate to ANSWER '\n", - " 'sheet\\n'\n", - " 'INFO: Searching for ANSWER tab with selector: '\n", - " 'span.docs-sheet-tab-name:has-text(\"ANSWER\")\\n'\n", - " 'INFO: ANSWER tab search result (attempt 1): Found\\n'\n", - " 'INFO: βœ… Found ANSWER sheet tab on attempt 1, clicking on it...\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Found ANSWER sheet tab on attempt 1, clicking on '\n", - " 'it\\n'\n", - " 'ERROR: ❌ Error navigating to ANSWER sheet on attempt 1: '\n", - " 'Locator.click: Timeout 30000ms exceeded.\\n'\n", - " 'Call log:\\n'\n", - " ' - waiting for '\n", - " 'locator(\"span.docs-sheet-tab-name:has-text(\\\\\"ANSWER\\\\\")\")\\n'\n", - " ' - - locator resolved to ANSWER\\n'\n", - " ' - - attempting click action\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 20ms\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 100ms\\n'\n", - " ' - 35 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 500ms\\n'\n", - " '\\n'\n", - " 'WARNING: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Error navigating to ANSWER sheet on attempt 1: '\n", - " 'Locator.click: Timeout 30000ms exceeded.\\n'\n", - " 'Call log:\\n'\n", - " ' - waiting for '\n", - " 'locator(\"span.docs-sheet-tab-name:has-text(\\\\\"ANSWER\\\\\")\")\\n'\n", - " ' - - locator resolved to ANSWER\\n'\n", - " ' - - attempting click action\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 20ms\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 100ms\\n'\n", - " ' - 35 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 500ms\\n'\n", - " '\\n'\n", - " 'INFO: Waiting 500ms before retry 2...\\n'\n", - " 'INFO: Attempt 2/3: Attempting to find and navigate to ANSWER sheet '\n", - " 'tab...\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Attempt 2/3: Attempting to navigate to ANSWER '\n", - " 'sheet\\n'\n", - " 'INFO: Searching for ANSWER tab with selector: '\n", - " 'span.docs-sheet-tab-name:has-text(\"ANSWER\")\\n'\n", - " 'INFO: ANSWER tab search result (attempt 2): Found\\n'\n", - " 'INFO: βœ… Found ANSWER sheet tab on attempt 2, clicking on it...\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Found ANSWER sheet tab on attempt 2, clicking on '\n", - " 'it\\n'\n", - " 'ERROR: ❌ Error navigating to ANSWER sheet on attempt 2: '\n", - " 'Locator.click: Timeout 30000ms exceeded.\\n'\n", - " 'Call log:\\n'\n", - " ' - waiting for '\n", - " 'locator(\"span.docs-sheet-tab-name:has-text(\\\\\"ANSWER\\\\\")\")\\n'\n", - " ' - - locator resolved to ANSWER\\n'\n", - " ' - - attempting click action\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 20ms\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 100ms\\n'\n", - " ' - 35 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 500ms\\n'\n", - " '\\n'\n", - " 'WARNING: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Error navigating to ANSWER sheet on attempt 2: '\n", - " 'Locator.click: Timeout 30000ms exceeded.\\n'\n", - " 'Call log:\\n'\n", - " ' - waiting for '\n", - " 'locator(\"span.docs-sheet-tab-name:has-text(\\\\\"ANSWER\\\\\")\")\\n'\n", - " ' - - locator resolved to ANSWER\\n'\n", - " ' - - attempting click action\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 20ms\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 100ms\\n'\n", - " ' - 35 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 500ms\\n'\n", - " '\\n'\n", - " 'INFO: Waiting 500ms before retry 3...\\n'\n", - " 'INFO: Attempt 3/3: Attempting to find and navigate to ANSWER sheet '\n", - " 'tab...\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Attempt 3/3: Attempting to navigate to ANSWER '\n", - " 'sheet\\n'\n", - " 'INFO: Searching for ANSWER tab with selector: '\n", - " 'span.docs-sheet-tab-name:has-text(\"ANSWER\")\\n'\n", - " 'INFO: ANSWER tab search result (attempt 3): Found\\n'\n", - " 'INFO: βœ… Found ANSWER sheet tab on attempt 3, clicking on it...\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Found ANSWER sheet tab on attempt 3, clicking on '\n", - " 'it\\n'\n", - " 'ERROR: ❌ Error navigating to ANSWER sheet on attempt 3: '\n", - " 'Locator.click: Timeout 30000ms exceeded.\\n'\n", - " 'Call log:\\n'\n", - " ' - waiting for '\n", - " 'locator(\"span.docs-sheet-tab-name:has-text(\\\\\"ANSWER\\\\\")\")\\n'\n", - " ' - - locator resolved to ANSWER\\n'\n", - " ' - - attempting click action\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 20ms\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 100ms\\n'\n", - " ' - 35 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 500ms\\n'\n", - " '\\n'\n", - " 'WARNING: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Error navigating to ANSWER sheet on attempt 3: '\n", - " 'Locator.click: Timeout 30000ms exceeded.\\n'\n", - " 'Call log:\\n'\n", - " ' - waiting for '\n", - " 'locator(\"span.docs-sheet-tab-name:has-text(\\\\\"ANSWER\\\\\")\")\\n'\n", - " ' - - locator resolved to ANSWER\\n'\n", - " ' - - attempting click action\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 20ms\\n'\n", - " ' - 2 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 100ms\\n'\n", - " ' - 35 Γ— waiting for element to be visible, enabled and stable\\n'\n", - " ' - - element is visible, enabled and stable\\n'\n", - " ' - - scrolling into view if needed\\n'\n", - " ' - - done scrolling\\n'\n", - " ' - -
'\n", - " 'intercepts pointer events\\n'\n", - " ' - - retrying click action\\n'\n", - " ' - - waiting 500ms\\n'\n", - " '\\n'\n", - " 'WARNING: ⚠️ Failed to navigate to ANSWER sheet after 3 attempts, '\n", - " 'proceeding with current sheet\\n'\n", - " 'WARNING: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Failed to navigate to ANSWER sheet after 3 '\n", - " 'attempts, proceeding with current sheet\\n'\n", - " 'INFO: === File Content Extraction ===\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Granted read-write permissions\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Extracting page contents\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Selecting content\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Successfully extracted 157940 characters from '\n", - " 'file\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Found 5003 rows in content\\n'\n", - " 'INFO: Content extracted: 157940 characters\\n'\n", - " 'INFO: === Cell Content Parsing ===\\n'\n", - " 'INFO: Split file content into 5003 rows\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Found 5003 rows in content\\n'\n", - " 'INFO: First few rows of content:\\n'\n", - " \"INFO: Row 1: 'TradeDate | Ticker | ClosePrice | Volume | | '\\n\"\n", - " \"INFO: Row 2: '2023-01-02 | ABC | 476.87 | 2225355 | | '\\n\"\n", - " \"INFO: Row 3: '2023-01-02 | DEF | 322.21 | 3778582 | | '\\n\"\n", - " 'INFO: ... and 5000 more rows\\n'\n", - " 'INFO: === Cell Reference Parsing ===\\n'\n", - " \"INFO: Processing cell reference: 'A1' -> expected: 'ABC'\\n\"\n", - " \"INFO: Parsed 'A1' -> row=1 (0-indexed: 0), col=A (0-indexed: 0)\\n\"\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Parsed cell A1 as row=0, col=0\\n'\n", - " 'INFO: Row 1 exists in content\\n'\n", - " \"INFO: Row 1 has 6 columns: ['Col1', 'Col2', 'Col3', 'Col4', \"\n", - " \"'Col5', 'Col6']\\n\"\n", - " \"INFO: βœ… Found value for A1: 'TradeDate'\\n\"\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " \"sheets_cell_values: Found value for A1: 'TradeDate'\\n\"\n", - " \"INFO: Processing cell reference: 'B1' -> expected: '-0.08'\\n\"\n", - " \"INFO: Parsed 'B1' -> row=1 (0-indexed: 0), col=B (0-indexed: 1)\\n\"\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Parsed cell B1 as row=0, col=1\\n'\n", - " 'INFO: Row 1 exists in content\\n'\n", - " \"INFO: Row 1 has 6 columns: ['Col1', 'Col2', 'Col3', 'Col4', \"\n", - " \"'Col5', 'Col6']\\n\"\n", - " \"INFO: βœ… Found value for B1: 'Ticker'\\n\"\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " \"sheets_cell_values: Found value for B1: 'Ticker'\\n\"\n", - " 'INFO: === Cell Value Comparison ===\\n'\n", - " 'INFO: Comparing cell A1:\\n'\n", - " \"INFO: Expected: 'ABC' (type: )\\n\"\n", - " \"INFO: Actual: 'TradeDate' (type: )\\n\"\n", - " \"INFO: ❌ VALUE MISMATCH: 'TradeDate' != 'ABC'\\n\"\n", - " 'INFO: Comparing cell B1:\\n'\n", - " \"INFO: Expected: '-0.08' (type: )\\n\"\n", - " \"INFO: Actual: 'Ticker' (type: )\\n\"\n", - " \"INFO: ❌ VALUE MISMATCH: 'Ticker' != '-0.08'\\n\"\n", - " 'INFO: === Final Results ===\\n'\n", - " 'INFO: Cell comparison summary:\\n'\n", - " 'INFO: Total cells checked: 2\\n'\n", - " 'INFO: Matches: 0\\n'\n", - " 'INFO: Mismatches: 2\\n'\n", - " \"INFO: Failed cells: ['A1:', 'B1:']\\n\"\n", - " 'INFO: ❌ NOT all cells match expected values\\n'\n", - " 'INFO: Mismatches: [\"Cell A1: expected \\'ABC\\', got \\'TradeDate\\'\", '\n", - " '\"Cell B1: expected \\'-0.08\\', got \\'Ticker\\'\"]\\n'\n", - " 'INFO: [TASK af7a34a0-43b0-44d2-82d0-2b66ed16f1ea] '\n", - " 'sheets_cell_values: Mismatches found: [\"Cell A1: expected \\'ABC\\', '\n", - " 'got \\'TradeDate\\'\", \"Cell B1: expected \\'-0.08\\', got \\'Ticker\\'\"]\\n'\n", - " 'INFO: Final reward: 0.0\\n'\n", - " 'INFO: === Sheets Cell Values Evaluation Complete ===\\n'\n", - " 'INFO: Evaluation completed. Final reward: 0.0\\n',\n", - " 'reward': 0.0}\n" - ] - } - ], - "source": [ - "# Evaluate environment state\n", - "result = await env.evaluate()\n", - "print(\"=== Final Evaluation ===\")\n", - "pprint(result)" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Environment closed\n" - ] - } - ], - "source": [ - "# Clean up\n", - "await env.close()\n", - "print(\"Environment closed\")" + "# Quick single-task smoke test on OSWorld-Verified-XLang\n", + "# You can swap \"hud-evals/OSWorld-Verified-XLang\" -> \"hud-evals/SheetBench-V2\" to test SheetBench.\n", + "await run_single_task(\n", + " dataset=\"hud-evals/OSWorld-Verified-XLang\",\n", + " model=\"openai/computer-use-preview+openai/gpt-5-nano\", # or any supported model string\n", + " task_id=155 # open last tab task (easy)\n", + ")" ] }, { @@ -768,109261 +131,4834 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import uuid\n", + "from agent.integrations.hud import run_full_dataset\n", + "\n", + "# Full dataset evaluation (runs via HUD's run_dataset under the hood)\n", + "job_name = f\"osworld-test-{str(uuid.uuid4())[:4]}\"\n", + "\n", + "results = await run_full_dataset(\n", + " dataset=\"hud-evals/OSWorld-Verified-XLang\", # You can also pass a Dataset or a list[dict]\n", + " job_name=job_name, # Optional; defaults to a timestamp for custom datasets\n", + " model=\"openai/computer-use-preview\", # Or any supported model string\n", + " max_concurrent=20, # Tune to your infra\n", + " max_steps=50, # Safety cap per task\n", + " split=\"train[:3]\" # Limit to just 3 tasks\n", + ")\n", + "\n", + "# results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\n", + "print(f\"Job: {job_name}\")\n", + "print(f\"Total results: {len(results)}\")\n", + "pprint(results[:3]) # preview" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Benchmark Composed Agents" + ] + }, + { + "cell_type": "code", + "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v2/tasksets/OSWorld-Verified/tasks \"HTTP/1.1 200 OK\"\n", - "INFO:venv:Taskset OSWorld-Verified loaded successfully\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/jobs \"HTTP/1.1 200 OK\"\n", - " 0%|----------------------------------------| 0/7340 [0:12 output.csv\\nlibreoffice --calc output.csv\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'cd ~/Desktop\\nlibreoffice --headless --convert-to csv file1.xlsx\\nlibreoffice --headless --convert-to csv file2.ods\\ncat file1.csv file2.csv > output.csv\\nlibreoffice --calc output.csv\\n'})\n", - "2025-08-11 15:32:49,711 - agent.ComputerAgent - INFO - Computer: click({'x': 694, 'y': 248})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 694, 'y': 248})\n", - " 2%|----------------------------------------| 155/7340 [6:28<300:27, 23.9 steps/min]2025-08-11 15:32:50,329 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:32:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:32:51,007 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:32:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 157/7340 [6:30<297:33, 24.1 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:32:51,672 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:32:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:32:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:32:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 2%|----------------------------------------| 157/7340 [6:32<299:06, 24.0 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:32:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:32:54,187 - agent.ComputerAgent - INFO - Computer: click({'x': 463, 'y': 136})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 463, 'y': 136})\n", - " 2%|----------------------------------------| 157/7340 [6:33<299:58, 23.9 steps/min]\u001b[92m15:32:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:32:54,845 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 103, 'y': 380})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 103, 'y': 380})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/edaeedb6-9993-4b6f-b226-19e2768a5736/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 158/7340 [6:34<298:46, 24.0 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7ac3560-cea1-4b97-a59c-4b3038bec6c7/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:32:55,978 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m15:32:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 159/7340 [6:35<297:36, 24.1 steps/min]2025-08-11 15:32:56,643 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:32:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:32:57,305 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m15:32:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 159/7340 [6:36<298:28, 24.1 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 159/7340 [6:37<299:14, 24.0 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0b1cfd32-0cbc-48e7-890d-9ec0ac043035/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:32:59,978 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:33:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 159/7340 [6:39<300:32, 23.9 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:01,288 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 159/7340 [6:41<302:27, 23.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:03,240 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:33:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:33:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:03,930 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 390, 'y': 345})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 390, 'y': 345})\n", - " 2%|----------------------------------------| 159/7340 [6:43<303:26, 23.7 steps/min]\u001b[92m15:33:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:04,616 - agent.ComputerAgent - INFO - Computer: click({'x': 101, 'y': 295})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 101, 'y': 295})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:06,000 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "2025-08-11 15:33:06,671 - agent.ComputerAgent - INFO - Computer: click({'x': 219, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 219, 'y': 53})\n", - " 2%|----------------------------------------| 160/7340 [6:45<303:33, 23.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:08,650 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:33:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:33:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 2%|----------------------------------------| 162/7340 [6:48<301:44, 23.8 steps/min]\u001b[92m15:33:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:09,987 - agent.ComputerAgent - INFO - Computer: click({'x': 812, 'y': 189})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 812, 'y': 189})\n", - "\u001b[92m15:33:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:11,307 - agent.ComputerAgent - INFO - Computer: click({'x': 102, 'y': 238})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 102, 'y': 238})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:12,659 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 2%|----------------------------------------| 162/7340 [6:52<304:40, 23.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:14,031 - agent.ComputerAgent - INFO - Computer: click({'x': 309, 'y': 116})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 309, 'y': 116})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:15,326 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "2025-08-11 15:33:15,962 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m15:33:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 164/7340 [6:55<303:17, 23.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:17,260 - agent.ComputerAgent - INFO - Computer: click({'x': 652, 'y': 139})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 652, 'y': 139})\n", - "2025-08-11 15:33:17,929 - agent.ComputerAgent - INFO - Computer: click({'x': 212, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 212, 'y': 53})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:33:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:20,602 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+alt+t'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+alt+t'})\n", - " 2%|----------------------------------------| 166/7340 [6:59<302:22, 23.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:21,282 - agent.ComputerAgent - INFO - Computer: click({'x': 371, 'y': 624})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 371, 'y': 624})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:22,607 - agent.ComputerAgent - INFO - Computer: type({'text': '100'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '100'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8d197f4f-b7b0-4196-9681-135d7bc3a45b/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e9d83ed4-d6d0-46f7-982b-98433769e30b/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:23,270 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - " 2%|----------------------------------------| 168/7340 [7:02<300:36, 23.9 steps/min]\u001b[92m15:33:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:33:23,919 - agent.ComputerAgent - INFO - Computer: click({'x': 414, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 414, 'y': 75})\n", - "2025-08-11 15:33:24,594 - agent.ComputerAgent - INFO - Computer: double_click({'x': 473, 'y': 93})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 473, 'y': 93})\n", - " 2%|----------------------------------------| 170/7340 [7:03<297:55, 24.1 steps/min]2025-08-11 15:33:25,220 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m15:33:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ae9871c0-5cb9-4c5b-9c02-c899819f9f81/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:25,919 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:33:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 172/7340 [7:05<295:18, 24.3 steps/min]2025-08-11 15:33:26,562 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m15:33:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:27,860 - agent.ComputerAgent - INFO - Computer: type({'text': 'edited_colorful.png'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'edited_colorful.png'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e1e61614-8290-4d90-9feb-594d2a7199e8/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 172/7340 [7:07<297:07, 24.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:33:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:29,689 - agent.ComputerAgent - INFO - Computer: click({'x': 693, 'y': 130})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 693, 'y': 130})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 173/7340 [7:08<296:07, 24.2 steps/min]2025-08-11 15:33:30,343 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m15:33:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cfefeec4-603f-4657-b0fe-7a641734693c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/982f8f16-b578-409f-8388-d8d5ee68ccee/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:31,382 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:33:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2d349f43-6c63-4144-9bd3-bbd16183b16d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/69393c41-bcaa-4752-9a82-e3b105fae459/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 174/7340 [7:10<295:35, 24.2 steps/min]2025-08-11 15:33:32,020 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m15:33:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:32,699 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:33:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 174/7340 [7:11<296:28, 24.2 steps/min]2025-08-11 15:33:33,362 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:33:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/reset \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:34,058 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m15:33:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5a854981-aa94-433f-9381-2964f1117035/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/edaeedb6-9993-4b6f-b226-19e2768a5736/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7ac3560-cea1-4b97-a59c-4b3038bec6c7/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 174/7340 [7:13<297:25, 24.1 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0b1cfd32-0cbc-48e7-890d-9ec0ac043035/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:34,700 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:33:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:33:35,380 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:33:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 174/7340 [7:14<298:20, 24.0 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:36,059 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:33:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/49f1eefe-9bc4-430c-a6c8-83675960a057/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 200 OK\"\n", - " 2%|----------------------------------------| 174/7340 [7:16<299:43, 23.9 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:38,043 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m15:33:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:33:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:39,360 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 285})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 285})\n", - "\u001b[92m15:33:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 2%|----------------------------------------| 174/7340 [7:18<301:01, 23.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:40,000 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:33:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:33:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:40,660 - agent.ComputerAgent - INFO - Computer: click({'x': 20, 'y': 139})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 20, 'y': 139})\n", - "\u001b[92m15:33:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:42,006 - agent.ComputerAgent - INFO - Computer: type({'text': 'clear cookies on exit'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'clear cookies on exit'})\n", - " 2%|----------------------------------------| 175/7340 [7:21<301:04, 23.8 steps/min]2025-08-11 15:33:42,645 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:33:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:33:43,335 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 46, 'y': 166}, {'x': 386, 'y': 356}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 46, 'y': 166}, {'x': 386, 'y': 356}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 2%|----------------------------------------| 177/7340 [7:23<298:59, 24.0 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:33:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:33:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:45,327 - agent.ComputerAgent - INFO - Computer: double_click({'x': 244, 'y': 155})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 244, 'y': 155})\n", - "\u001b[92m15:33:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:46,591 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 15:33:46,592 - agent.ComputerAgent - INFO - Agent: Opening the desktop and launching GIMP to convert the image to SVG.\n", - "INFO:agent.ComputerAgent:Agent: Opening the desktop and launching GIMP to convert the image to SVG.\n", - "2025-08-11 15:33:46,593 - agent.ComputerAgent - INFO - Computer: screenshot({})\n", - "INFO:agent.ComputerAgent:Computer: screenshot({})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 2%|----------------------------------------| 178/7340 [7:26<299:25, 23.9 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:47,979 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -525, 'scroll_x': 0, 'x': 126, 'y': 419})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -525, 'scroll_x': 0, 'x': 126, 'y': 419})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:48,644 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:33:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 2%|----------------------------------------| 180/7340 [7:27<296:56, 24.1 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:33:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:49,323 - agent.ComputerAgent - INFO - Computer: click({'x': 249, 'y': 81})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 249, 'y': 81})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:51,332 - agent.ComputerAgent - INFO - Computer: type({'text': 'focus editor'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'focus editor'})\n", - " 2%|----------------------------------------| 181/7340 [7:30<296:59, 24.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:33:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:33:52,512 - agent.ComputerAgent - INFO - Computer: click({'x': 416, 'y': 74})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 416, 'y': 74})\n", - " 2%|----------------------------------------| 183/7340 [7:31<294:26, 24.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 15:33:53,159 - agent.ComputerAgent - INFO - LLM processing started with 7 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 7 messages\n", - "\u001b[92m15:33:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3d9da005-d40d-4335-86ec-275c2ec5665b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:33:53,831 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:33:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:33:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 184/7340 [7:33<294:07, 24.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:33:55,575 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m15:33:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:33:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8d197f4f-b7b0-4196-9681-135d7bc3a45b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/69393c41-bcaa-4752-9a82-e3b105fae459/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 184/7340 [7:34<294:47, 24.3 steps/min]2025-08-11 15:33:56,223 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 336, 'y': 493})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 336, 'y': 493})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/982f8f16-b578-409f-8388-d8d5ee68ccee/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:56,861 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:33:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cfefeec4-603f-4657-b0fe-7a641734693c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e9d83ed4-d6d0-46f7-982b-98433769e30b/invoke \"HTTP/1.1 200 OK\"\n", - "ERROR:asyncio:Unclosed client session\n", - "client_session: \n", - " 3%|β–ˆ---------------------------------------| 184/7340 [7:36<295:51, 24.2 steps/min]2025-08-11 15:33:58,012 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m15:33:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/49f1eefe-9bc4-430c-a6c8-83675960a057/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:33:58,652 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m15:33:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 185/7340 [7:37<295:08, 24.2 steps/min]2025-08-11 15:33:59,334 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:33:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:33:59,993 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:34:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 185/7340 [7:39<296:01, 24.2 steps/min]2025-08-11 15:34:01,015 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:34:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 185/7340 [7:40<297:07, 24.1 steps/min]\u001b[92m15:34:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7ac3560-cea1-4b97-a59c-4b3038bec6c7/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:02,373 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:34:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:34:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:03,423 - agent.ComputerAgent - INFO - Computer: click({'x': 692, 'y': 624})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 692, 'y': 624})\n", - " 3%|β–ˆ---------------------------------------| 185/7340 [7:42<298:12, 24.0 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 186/7340 [7:44<297:45, 24.0 steps/min]\u001b[92m15:34:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5a854981-aa94-433f-9381-2964f1117035/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:34:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:07,220 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'meta'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'meta'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:08,609 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 550, 'y': 627})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 550, 'y': 627})\n", - "\u001b[92m15:34:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 186/7340 [7:49<300:46, 23.8 steps/min]\u001b[92m15:34:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:10,587 - agent.ComputerAgent - INFO - Computer: click({'x': 515, 'y': 457})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 515, 'y': 457})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:11,253 - agent.ComputerAgent - INFO - Computer: click({'x': 905, 'y': 50})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 905, 'y': 50})\n", - "\u001b[92m15:34:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:11,905 - agent.ComputerAgent - INFO - Computer: click({'x': 476, 'y': 169})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 476, 'y': 169})\n", - " 3%|β–ˆ---------------------------------------| 188/7340 [7:51<298:42, 23.9 steps/min]2025-08-11 15:34:12,560 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_x': 0, 'scroll_y': -659, 'x': 18, 'y': 13})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_x': 0, 'scroll_y': -659, 'x': 18, 'y': 13})\n", - "\u001b[92m15:34:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:13,202 - agent.ComputerAgent - INFO - Computer: click({'x': 19, 'y': 44})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 19, 'y': 44})\n", - " 3%|β–ˆ---------------------------------------| 191/7340 [7:52<294:43, 24.3 steps/min]2025-08-11 15:34:13,860 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:34:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 502 Bad Gateway\"\n", - "2025-08-11 15:34:14,514 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m15:34:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 193/7340 [7:53<292:23, 24.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 193/7340 [7:55<293:37, 24.3 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:34:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:17,714 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -575, 'scroll_x': 0, 'x': 90, 'y': 194})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -575, 'scroll_x': 0, 'x': 90, 'y': 194})\n", - " 3%|β–ˆ---------------------------------------| 193/7340 [7:56<294:20, 24.3 steps/min]\u001b[92m15:34:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:18,367 - agent.ComputerAgent - INFO - Computer: click({'x': 120, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 120, 'y': 53})\n", - "\u001b[92m15:34:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/982f8f16-b578-409f-8388-d8d5ee68ccee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f1593044-fc61-4fc8-b29d-87e37914d5c2/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:34:19,040 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 430})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 430})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0b1cfd32-0cbc-48e7-890d-9ec0ac043035/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 194/7340 [7:58<293:36, 24.3 steps/min]2025-08-11 15:34:19,683 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m15:34:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:34:20,361 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m15:34:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2d349f43-6c63-4144-9bd3-bbd16183b16d/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 196/7340 [7:59<291:21, 24.5 steps/min]2025-08-11 15:34:21,003 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m15:34:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/edaeedb6-9993-4b6f-b226-19e2768a5736/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:34:21,660 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:34:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 196/7340 [8:01<292:33, 24.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:23,387 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:34:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:34:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 196/7340 [8:02<293:16, 24.4 steps/min]2025-08-11 15:34:24,065 - agent.ComputerAgent - INFO - Computer: click({'x': 414, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 414, 'y': 75})\n", - "2025-08-11 15:34:24,731 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:34:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:34:25,787 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:34:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8d197f4f-b7b0-4196-9681-135d7bc3a45b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 197/7340 [8:06<294:10, 24.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:28,860 - agent.ComputerAgent - INFO - Computer: type({'text': ' active editor group'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': ' active editor group'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cfefeec4-603f-4657-b0fe-7a641734693c/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 197/7340 [8:08<294:56, 24.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:29,500 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m15:34:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:34:30,194 - agent.ComputerAgent - INFO - Computer: click({'x': 625, 'y': 427})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 625, 'y': 427})\n", - "2025-08-11 15:34:30,876 - agent.ComputerAgent - INFO - Computer: click({'x': 904, 'y': 558})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 904, 'y': 558})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 198/7340 [8:11<295:24, 24.2 steps/min]\u001b[92m15:34:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:32,811 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m15:34:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:34,147 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "\u001b[92m15:34:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 200/7340 [8:13<293:33, 24.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:34,769 - agent.ComputerAgent - INFO - Computer: click({'x': 183, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 183, 'y': 53})\n", - "2025-08-11 15:34:35,412 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 15:34:35,413 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 428})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 428})\n", - "2025-08-11 15:34:36,077 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:34:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 200/7340 [8:16<295:30, 24.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:38,113 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:34:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:34:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/e1e61614-8290-4d90-9feb-594d2a7199e8/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:40,185 - agent.ComputerAgent - INFO - LLM processing started with 9 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 9 messages\n", - "\u001b[92m15:34:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:34:40,912 - agent.ComputerAgent - INFO - Computer: move({'x': 230, 'y': 128})\n", - "INFO:agent.ComputerAgent:Computer: move({'x': 230, 'y': 128})\n", - "\u001b[92m15:34:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 202/7340 [8:20<294:32, 24.2 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:41,633 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 361, 'y': 549})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 361, 'y': 549})\n", - "\u001b[92m15:34:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:42,290 - agent.ComputerAgent - INFO - Computer: click({'x': 93, 'y': 184})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 93, 'y': 184})\n", - "\u001b[92m15:34:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 203/7340 [8:21<293:50, 24.3 steps/min]2025-08-11 15:34:42,946 - agent.ComputerAgent - INFO - Computer: click({'x': 17, 'y': 382})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 17, 'y': 382})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7ac3560-cea1-4b97-a59c-4b3038bec6c7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/49f1eefe-9bc4-430c-a6c8-83675960a057/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:34:43,638 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:34:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 205/7340 [8:23<292:06, 24.4 steps/min]\u001b[92m15:34:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:44,985 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:34:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:34:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:45,658 - agent.ComputerAgent - INFO - Computer: click({'x': 332, 'y': 92})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 332, 'y': 92})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3d9da005-d40d-4335-86ec-275c2ec5665b/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 206/7340 [8:24<291:23, 24.5 steps/min]2025-08-11 15:34:46,281 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:34:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e9d83ed4-d6d0-46f7-982b-98433769e30b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:34:47,314 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:34:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e1e61614-8290-4d90-9feb-594d2a7199e8/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 207/7340 [8:26<290:55, 24.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/982f8f16-b578-409f-8388-d8d5ee68ccee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ae9871c0-5cb9-4c5b-9c02-c899819f9f81/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:34:47,977 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m15:34:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:34:48,632 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m15:34:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 207/7340 [8:28<292:01, 24.4 steps/min]2025-08-11 15:34:49,913 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m15:34:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:34:50,584 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:34:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:51,263 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m15:34:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 207/7340 [8:31<293:58, 24.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:34:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:53,944 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 148, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 148, 'y': 105})\n", - "\u001b[92m15:34:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0b1cfd32-0cbc-48e7-890d-9ec0ac043035/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 207/7340 [8:33<295:06, 24.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:55,256 - agent.ComputerAgent - INFO - Computer: click({'x': 18, 'y': 477})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 18, 'y': 477})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:34:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:34:57,289 - agent.ComputerAgent - INFO - Computer: type({'text': '100'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '100'})\n", - "2025-08-11 15:34:57,983 - agent.ComputerAgent - INFO - Computer: click({'x': 462, 'y': 133})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 462, 'y': 133})\n", - "\u001b[92m15:34:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:34:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 208/7340 [8:37<295:33, 24.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:34:58,660 - agent.ComputerAgent - INFO - Computer: click({'x': 308, 'y': 116})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 308, 'y': 116})\n", - "2025-08-11 15:34:59,285 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 15:34:59,286 - agent.ComputerAgent - INFO - Computer: click({'x': 387, 'y': 158})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 387, 'y': 158})\n", - "\u001b[92m15:34:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:35:00,674 - agent.ComputerAgent - INFO - Computer: click({'x': 640, 'y': 436, 'button': 'left'})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 640, 'y': 436, 'button': 'left'})\n", - " 3%|β–ˆ---------------------------------------| 211/7340 [8:39<292:45, 24.4 steps/min]2025-08-11 15:35:01,337 - agent.ComputerAgent - INFO - Computer: click({'x': 420, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 420, 'y': 101})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:35:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 214/7340 [8:41<289:39, 24.6 steps/min]\u001b[92m15:35:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:03,280 - agent.ComputerAgent - INFO - Computer: double_click({'x': 213, 'y': 117})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 213, 'y': 117})\n", - "\u001b[92m15:35:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:03,948 - agent.ComputerAgent - INFO - Computer: click({'x': 416, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 416, 'y': 75})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 215/7340 [8:43<289:17, 24.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:05,212 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:35:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:35:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:05,863 - agent.ComputerAgent - INFO - Computer: click({'x': 610, 'y': 60})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 610, 'y': 60})\n", - " 3%|β–ˆ---------------------------------------| 217/7340 [8:45<287:15, 24.8 steps/min]2025-08-11 15:35:06,527 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m15:35:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:07,204 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m15:35:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 218/7340 [8:47<287:00, 24.8 steps/min]\u001b[92m15:35:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8d197f4f-b7b0-4196-9681-135d7bc3a45b/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:09,599 - agent.ComputerAgent - INFO - Computer: click({'x': 385, 'y': 35})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 385, 'y': 35})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/69393c41-bcaa-4752-9a82-e3b105fae459/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 218/7340 [8:49<288:18, 24.7 steps/min]2025-08-11 15:35:10,889 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 123})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 123})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:35:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/edaeedb6-9993-4b6f-b226-19e2768a5736/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5a854981-aa94-433f-9381-2964f1117035/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e1e61614-8290-4d90-9feb-594d2a7199e8/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7ac3560-cea1-4b97-a59c-4b3038bec6c7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e9d83ed4-d6d0-46f7-982b-98433769e30b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2d349f43-6c63-4144-9bd3-bbd16183b16d/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 219/7340 [8:50<287:40, 24.8 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:12,208 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -496, 'scroll_x': 0, 'x': 90, 'y': 219})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -496, 'scroll_x': 0, 'x': 90, 'y': 219})\n", - "\u001b[92m15:35:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:12,847 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:35:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:13,519 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 141})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 141})\n", - "2025-08-11 15:35:14,161 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m15:35:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:14,807 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m15:35:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 220/7340 [8:54<288:03, 24.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:35:15,833 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m15:35:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [8:55<285:57, 24.9 steps/min]2025-08-11 15:35:16,495 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m15:35:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:17,131 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m15:35:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [8:56<286:37, 24.8 steps/min]2025-08-11 15:35:17,814 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:35:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:18,873 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:35:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:19,539 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:35:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [8:58<287:54, 24.7 steps/min]2025-08-11 15:35:20,204 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m15:35:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:20,855 - agent.ComputerAgent - INFO - LLM processing started with 11 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 11 messages\n", - "\u001b[92m15:35:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [9:00<288:36, 24.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ae9871c0-5cb9-4c5b-9c02-c899819f9f81/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:35:22,016 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m15:35:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f1593044-fc61-4fc8-b29d-87e37914d5c2/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [9:01<289:13, 24.6 steps/min]2025-08-11 15:35:22,704 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m15:35:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3d9da005-d40d-4335-86ec-275c2ec5665b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:23,375 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:35:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [9:02<289:57, 24.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cfefeec4-603f-4657-b0fe-7a641734693c/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [9:03<290:29, 24.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:35:25,703 - agent.ComputerAgent - INFO - Computer: type({'text': 'delete browsing data on exit'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'delete browsing data on exit'})\n", - " 3%|β–ˆ---------------------------------------| 222/7340 [9:04<291:11, 24.4 steps/min]2025-08-11 15:35:26,361 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:35:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 223/7340 [9:07<291:14, 24.4 steps/min]\u001b[92m15:35:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:35:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:29,669 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 585, 'y': 449})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 585, 'y': 449})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 3%|β–ˆ---------------------------------------| 223/7340 [9:09<292:18, 24.3 steps/min]\u001b[92m15:35:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:30,981 - agent.ComputerAgent - INFO - Computer: click({'x': 586, 'y': 134})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 586, 'y': 134})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:32,967 - agent.ComputerAgent - INFO - Computer: double_click({'x': 244, 'y': 154})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 244, 'y': 154})\n", - "\u001b[92m15:35:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:34,310 - agent.ComputerAgent - INFO - Agent: I enhanced the color vibrancy of your photo and exported it as edited_colorful.png to your Desktop.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: I enhanced the color vibrancy of your photo and exported it as edited_colorful.png to your Desktop.\n", - "\n", - "Task completed\n", - "2025-08-11 15:35:34,935 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 288\n", - " - prompt_tokens: 10800\n", - " - total_tokens: 11088\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 256\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0164\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 288\n", - " - prompt_tokens: 10800\n", - " - total_tokens: 11088\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 256\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0164\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/69393c41-bcaa-4752-9a82-e3b105fae459/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 225/7340 [9:14<292:25, 24.3 steps/min]2025-08-11 15:35:36,296 - agent.ComputerAgent - INFO - Computer: click({'x': 332, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 332, 'y': 105})\n", - "2025-08-11 15:35:36,947 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 650, 'x': 261, 'y': 230})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 650, 'x': 261, 'y': 230})\n", - "\u001b[92m15:35:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:35:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:38,290 - agent.ComputerAgent - INFO - Computer: click({'x': 955, 'y': 130})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 955, 'y': 130})\n", - "2025-08-11 15:35:38,926 - agent.ComputerAgent - INFO - Computer: click({'x': 414, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 414, 'y': 75})\n", - "\u001b[92m15:35:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 228/7340 [9:18<290:09, 24.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:39,569 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:35:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:40,246 - agent.ComputerAgent - INFO - Computer: click({'x': 16, 'y': 478})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 16, 'y': 478})\n", - "\u001b[92m15:35:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 231/7340 [9:20<287:30, 24.7 steps/min]2025-08-11 15:35:41,893 - agent.ComputerAgent - INFO - Computer: click({'x': 183, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 183, 'y': 53})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 3%|β–ˆ---------------------------------------| 232/7340 [9:21<286:44, 24.8 steps/min]\u001b[92m15:35:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:43,077 - agent.ComputerAgent - INFO - Computer: click({'x': 506, 'y': 190})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 506, 'y': 190})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 233/7340 [9:22<286:10, 24.8 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 15:35:44,347 - agent.ComputerAgent - INFO - LLM processing started with 11 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 11 messages\n", - "\u001b[92m15:35:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:35:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:45,032 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 547, 'scroll_x': 0, 'x': 125, 'y': 629})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 547, 'scroll_x': 0, 'x': 125, 'y': 629})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 15:35:46,313 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - " 3%|β–ˆ---------------------------------------| 234/7340 [9:25<286:13, 24.8 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8d197f4f-b7b0-4196-9681-135d7bc3a45b/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:47,613 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m15:35:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:35:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0b1cfd32-0cbc-48e7-890d-9ec0ac043035/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e9d83ed4-d6d0-46f7-982b-98433769e30b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/49f1eefe-9bc4-430c-a6c8-83675960a057/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7ac3560-cea1-4b97-a59c-4b3038bec6c7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a6ee00b-4e8c-4a3f-bac1-9baec4d920a2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e1e61614-8290-4d90-9feb-594d2a7199e8/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 236/7340 [9:26<284:25, 25.0 steps/min]2025-08-11 15:35:48,312 - agent.ComputerAgent - INFO - Computer: click({'x': 877, 'y': 537})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 877, 'y': 537})\n", - "2025-08-11 15:35:49,338 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m15:35:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:50,012 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m15:35:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 236/7340 [9:29<285:36, 24.9 steps/min]2025-08-11 15:35:50,692 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m15:35:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:51,345 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m15:35:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:35:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 237/7340 [9:31<285:21, 24.9 steps/min]2025-08-11 15:35:52,714 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m15:35:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:53,753 - agent.ComputerAgent - INFO - LLM processing started with 13 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 13 messages\n", - "\u001b[92m15:35:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:54,426 - agent.ComputerAgent - INFO - LLM processing started with 13 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 13 messages\n", - "\u001b[92m15:35:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/982f8f16-b578-409f-8388-d8d5ee68ccee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77f486b6-dc2a-4a1d-bf54-fc05f9a8c3d7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 3%|β–ˆ---------------------------------------| 237/7340 [9:34<286:52, 24.8 steps/min]\u001b[92m15:35:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:55,767 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m15:35:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 15:35:56,466 - agent.ComputerAgent - INFO - Computer: click({'x': 501, 'y': 55})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 501, 'y': 55})\n", - "\u001b[92m15:35:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:35:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cfefeec4-603f-4657-b0fe-7a641734693c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:35:58,194 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m15:35:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 3%|β–ˆ---------------------------------------| 238/7340 [9:37<287:10, 24.7 steps/min]2025-08-11 15:35:58,837 - agent.ComputerAgent - INFO - Computer: click({'x': 347, 'y': 186})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 347, 'y': 186})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:35:59,467 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m15:35:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:35:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 239/7340 [9:38<286:34, 24.8 steps/min]2025-08-11 15:36:00,161 - agent.ComputerAgent - INFO - Computer: click({'x': 309, 'y': 116})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 309, 'y': 116})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:36:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 240/7340 [9:40<285:59, 24.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:36:01,470 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m15:36:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:36:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:36:02,165 - agent.ComputerAgent - INFO - LLM processing started with 15 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 15 messages\n", - "\u001b[92m15:36:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m15:36:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 3%|β–ˆ---------------------------------------| 241/7340 [9:41<285:27, 24.9 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:36:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:36:03,352 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 633, 'y': 320}, {'x': 422, 'y': 393}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 633, 'y': 320}, {'x': 422, 'y': 393}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 3%|β–ˆ---------------------------------------| 249/7340 [9:42<276:30, 25.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/835128b8-2a29-46f4-853f-4d70bb46a9d6/close \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 250/7340 [9:43<275:49, 25.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:36:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:36:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/edaeedb6-9993-4b6f-b226-19e2768a5736/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a74f1790-a107-43c9-8389-0a50a5192c5f/invoke \"HTTP/1.1 200 OK\"\n", - " 3%|β–ˆ---------------------------------------| 250/7340 [9:45<276:35, 25.6 steps/min]INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 output.csv\n", - "- libreoffice --calc output.csv\n", - "\n", - "This converts both spreadsheets to CSV, concatenates their single columns into one file named output.csv, and opens it in LibreOffice Calc from the terminal.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: Done via Terminal.\n", - "\n", - "Commands executed:\n", - "- cd ~/Desktop\n", - "- libreoffice --headless --convert-to csv file1.xlsx\n", - "- libreoffice --headless --convert-to csv file2.ods\n", - "- cat file1.csv file2.csv > output.csv\n", - "- libreoffice --calc output.csv\n", - "\n", - "This converts both spreadsheets to CSV, concatenates their single columns into one file named output.csv, and opens it in LibreOffice Calc from the terminal.\n", - "\n", - "Task completed.\n", - "2025-08-11 15:39:44,642 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 679\n", - " - prompt_tokens: 8809\n", - " - total_tokens: 9488\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 576\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0178\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 679\n", - " - prompt_tokens: 8809\n", - " - total_tokens: 9488\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 576\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0178\n", - " 5%|β–ˆ---------------------------------------| 359/7340 [13:23<260:31, 26.8 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:39:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:39:45,831 - agent.ComputerAgent - INFO - Computer: click({'x': 111, 'y': 213})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 111, 'y': 213})\n", - " 5%|β–ˆ---------------------------------------| 359/7340 [13:25<260:54, 26.8 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:39:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:39:46,456 - agent.ComputerAgent - INFO - Computer: click({'x': 148, 'y': 739})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 148, 'y': 739})\n", - "\u001b[92m15:39:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:39:47,105 - agent.ComputerAgent - INFO - Computer: click({'x': 984, 'y': 68})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 984, 'y': 68})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:39:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:39:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 5%|β–ˆ---------------------------------------| 360/7340 [13:26<260:46, 26.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:39:48,477 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 607, 'scroll_x': 0, 'x': 91, 'y': 464})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 607, 'scroll_x': 0, 'x': 91, 'y': 464})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m15:39:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m15:39:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/invoke \"HTTP/1.1 200 OK\"\n", - " 5%|β–ˆ---------------------------------------| 362/7340 [13:28<259:43, 26.9 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 15:39:49,752 - agent.ComputerAgent - INFO - Computer: click({'x': 219, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 219, 'y': 53})\n", - "\u001b[92m15:39:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 15:39:50,434 - agent.ComputerAgent - INFO - Computer: click({'x': 416, 'y': 74})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 416, 'y': 74})\n", - " 5%|β–ˆβ–ˆ--------------------------------------| 374/7340 [13:30<251:38, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0a91cea7-3ffe-41c2-9405-1151904aee0c/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:39:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c8c54705-3689-4d05-b8e1-7a57903f3a21/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/63010886-f715-4208-aef0-b98c456e7e98/invoke \"HTTP/1.1 200 OK\"\n", - " 5%|β–ˆβ–ˆ--------------------------------------| 374/7340 [13:32<252:04, 27.6 steps/min]INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Preferences > Input / Codecs > Record directory or filename and saved the setting.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: I have changed VLC’s recordings folder to Desktop via Tools > Preferences > Input / Codecs > Record directory or filename and saved the setting.\n", - "\n", - "Task completed.\n", - "2025-08-11 15:53:48,419 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 232\n", - " - prompt_tokens: 8729\n", - " - total_tokens: 8961\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 192\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0132\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 232\n", - " - prompt_tokens: 8729\n", - " - total_tokens: 8961\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 192\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0132\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8a8f1594-3659-4132-9059-6fa366033df0/invoke \"HTTP/1.1 200 OK\"\n", - " 11%|β–ˆβ–ˆβ–ˆβ–ˆ------------------------------------| 777/7340 [27:30<232:18, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:53:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m15:53:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 11%|β–ˆβ–ˆβ–ˆβ–ˆ------------------------------------| 779/7340 [27:31<231:49, 28.3 steps/min]INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Open File (or drag-and-drop the files into VLC).\n", - "\n", - "If you want me to, I can play the show in the browser via Google TV, or play any DRM‑free local media in VLC.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: I can’t complete this as requested. Playing a Google Play Movies & TV purchase directly in VLC would require bypassing Google’s DRM (Widevine), which I won’t help with. Additionally, Stranger Things is a Netflix original and is typically not sold on Google Play, so it may not even be available for purchase there.\n", - "\n", - "Legal ways to watch:\n", - "- In a browser: Go to play.google.com/tv (or tv.google.com), sign in to your Google account, search for the title in your Library, and play it in the browser.\n", - "- On devices: Use the Google TV app on Android/iOS/Android TV or YouTube app where your Google Play purchases appear, and watch or cast to a TV.\n", - "- If you have DRM‑free video files you own, you can play them in VLC via Media > Open File (or drag-and-drop the files into VLC).\n", - "\n", - "If you want me to, I can play the show in the browser via Google TV, or play any DRM‑free local media in VLC.\n", - "\n", - "Task completed.\n", - "2025-08-11 16:05:33,041 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 1371\n", - " - prompt_tokens: 8756\n", - " - total_tokens: 10127\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1152\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 2560\n", - " - response_cost: $0.0218\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 1371\n", - " - prompt_tokens: 8756\n", - " - total_tokens: 10127\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1152\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 2560\n", - " - response_cost: $0.0218\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1143/7340 [39:14<212:46, 29.1 steps/min]2025-08-11 16:05:33,721 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:05:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:05:34,369 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:05:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1144/7340 [39:16<212:41, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:05:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:05:36,085 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:05:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:05:37,424 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+='})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+='})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1144/7340 [39:19<213:01, 29.1 steps/min]\u001b[92m16:05:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:05:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:05:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:05:39,411 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:05:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:05:40,051 - agent.ComputerAgent - INFO - Computer: click({'x': 247, 'y': 103})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 247, 'y': 103})\n", - "\u001b[92m16:05:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1144/7340 [39:21<213:11, 29.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:05:40,712 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:05:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:05:41,401 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:05:41,402 - agent.ComputerAgent - INFO - Computer: move({'x': 512, 'y': 32})\n", - "INFO:agent.ComputerAgent:Computer: move({'x': 512, 'y': 32})\n", - "\u001b[92m16:05:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:05:42,101 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:05:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1145/7340 [39:23<213:09, 29.1 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:05:42,775 - agent.ComputerAgent - INFO - Computer: click({'x': 87, 'y': 165})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 87, 'y': 165})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/932fb6ee-8e77-41ca-8220-27e0c8783ced/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:05:43,825 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:05:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9618024b-01b2-4c48-8a72-2ec16bffcf41/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1146/7340 [39:26<213:09, 29.1 steps/min]\u001b[92m16:05:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:05:45,138 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:05:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:05:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:05:45,820 - agent.ComputerAgent - INFO - Computer: click({'x': 709, 'y': 305})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 709, 'y': 305})\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1147/7340 [39:27<213:03, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9618024b-01b2-4c48-8a72-2ec16bffcf41/invoke \"HTTP/1.1 200 OK\"\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1157/7340 [39:28<210:57, 29.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9618024b-01b2-4c48-8a72-2ec16bffcf41/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8bb6b36b-e7fb-4e80-916a-501fa7ad17f9/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:05:48,172 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:05:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:05:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c3f7029e-7bbd-43fb-bea4-c66cc9ae685d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1157/7340 [39:30<211:08, 29.3 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:05:49,481 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:05:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7955abad-b178-4311-85d5-7f1dedbecbcc/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:05:50,142 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:05:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ffbf23fa-9bd6-4b26-befa-cb45d31fc4fa/invoke \"HTTP/1.1 200 OK\"\n", - " 16%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------------| 1157/7340 [39:31<211:15, 29.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00/dev/null || true'\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"bash -lc 'cd ~/Desktop && ls -1 *.png 2>/dev/null || true'\"})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:10:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1308/7340 [44:13<203:56, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:10:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:10:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/d8b3a739-de56-40fe-896f-831373c8ecee/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:10:32,953 - agent.ComputerAgent - INFO - Computer: click({'x': 512, 'y': 384})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 512, 'y': 384})\n", - "\u001b[92m16:10:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1310/7340 [44:15<203:42, 29.6 steps/min]\u001b[92m16:10:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:10:34,242 - agent.ComputerAgent - INFO - Computer: double_click({'x': 984, 'y': 658})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 984, 'y': 658})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:10:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:10:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:10:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:10:36,222 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 284})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 284})\n", - "\u001b[92m16:10:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1311/7340 [44:17<203:43, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:10:36,912 - agent.ComputerAgent - INFO - Computer: click({'x': 205, 'y': 735})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 205, 'y': 735})\n", - "\u001b[92m16:10:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:10:38,184 - agent.ComputerAgent - INFO - Computer: type({'text': 'sudo find . -type f -exec chmod 644 {} +'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'sudo find . -type f -exec chmod 644 {} +'})\n", - "2025-08-11 16:10:38,849 - agent.ComputerAgent - INFO - Computer: click({'x': 100, 'y': 390})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 100, 'y': 390})\n", - "\u001b[92m16:10:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1313/7340 [44:21<203:36, 29.6 steps/min]\u001b[92m16:10:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:10:40,187 - agent.ComputerAgent - INFO - Computer: click({'x': 359, 'y': 258})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 359, 'y': 258})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:10:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:10:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1316/7340 [44:22<203:08, 29.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:10:41,570 - agent.ComputerAgent - INFO - Computer: click({'x': 131, 'y': 91, 'button': 'left'})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 131, 'y': 91, 'button': 'left'})\n", - "2025-08-11 16:10:42,221 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:10:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:10:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1317/7340 [44:24<203:03, 29.7 steps/min]2025-08-11 16:10:42,902 - agent.ComputerAgent - INFO - Computer: click({'x': 910, 'y': 233})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 910, 'y': 233})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1318/7340 [44:25<202:56, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:10:45,238 - agent.ComputerAgent - INFO - Computer: type({'text': 'https://upload.wikimedia.org/wikipedia/en/thumb/1/1e/The_University_of_Hong_Kong_crest.svg/1200px-The_University_of_Hong_Kong_crest.svg.png'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'https://upload.wikimedia.org/wikipedia/en/thumb/1/1e/The_University_of_Hong_Kong_crest.svg/1200px-The_University_of_Hong_Kong_crest.svg.png'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a23ddde7-5509-407d-af64-ea09807c1af1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa4f593f-4977-4dc4-9238-0a67602a0900/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fafe8f9a-bc46-42ad-b3ca-7190a64ab552/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1319/7340 [44:26<202:54, 29.7 steps/min]2025-08-11 16:10:45,918 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:10:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ca85c226-0c49-4084-b2bc-86bd540c8bce/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/79295f2f-2987-488c-b4b7-c968f71c7597/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b2656d0e-a6f4-4ecb-a099-cfe8471c4998/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:10:46,633 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:10:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1320/7340 [44:28<202:49, 29.7 steps/min]2025-08-11 16:10:47,821 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:10:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3b3e7fbd-8c02-45a6-bb3d-83c056398d3f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d8b3a739-de56-40fe-896f-831373c8ecee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/80299c20-3bcf-48b1-a471-299a1eda0a00/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1320/7340 [44:29<202:55, 29.7 steps/min]2025-08-11 16:10:48,618 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m16:10:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89cdf329-a61d-4d69-9c6c-5d0ea35677b6/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:10:49,365 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:10:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c3f7029e-7bbd-43fb-bea4-c66cc9ae685d/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1320/7340 [44:31<203:01, 29.7 steps/min]2025-08-11 16:10:50,151 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:10:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:10:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:10:51,510 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:10:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1320/7340 [44:33<203:11, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ffbf23fa-9bd6-4b26-befa-cb45d31fc4fa/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:10:52,195 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:10:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:10:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/932fb6ee-8e77-41ca-8220-27e0c8783ced/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:10:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:10:52,855 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:10:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:10:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1320/7340 [44:34<203:17, 29.6 steps/min]2025-08-11 16:10:53,549 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:10:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:10:54,253 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 420, 'y': 162}, {'x': 170, 'y': 133}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 420, 'y': 162}, {'x': 170, 'y': 133}]})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1320/7340 [44:36<203:24, 29.6 steps/min]2025-08-11 16:10:55,291 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:10:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1321/7340 [44:37<203:17, 29.6 steps/min]2025-08-11 16:10:55,940 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:10:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:10:56,604 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:10:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1321/7340 [44:38<203:23, 29.6 steps/min]2025-08-11 16:10:57,291 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:10:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1321/7340 [44:40<203:32, 29.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:10:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:10:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:00,151 - agent.ComputerAgent - INFO - Computer: click({'x': 122, 'y': 219})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 122, 'y': 219})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1321/7340 [44:42<203:43, 29.5 steps/min]\u001b[92m16:11:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:11:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:02,396 - agent.ComputerAgent - INFO - Computer: click({'x': 16, 'y': 429})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 16, 'y': 429})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e8a299f4-d946-4970-b9a4-2503717de8ce/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:03,765 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1322/7340 [44:45<203:44, 29.5 steps/min]2025-08-11 16:11:04,441 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:11:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:05,798 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cc2e38be-6768-4928-bfe5-d7f31cb68b24/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1324/7340 [44:48<203:37, 29.5 steps/min]\u001b[92m16:11:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:07,755 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:11:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:09,113 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "2025-08-11 16:11:09,745 - agent.ComputerAgent - INFO - Computer: click({'x': 434, 'y': 418})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 434, 'y': 418})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1325/7340 [44:52<203:44, 29.5 steps/min]\u001b[92m16:11:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:12,504 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "2025-08-11 16:11:13,143 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:11:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:11:13,809 - agent.ComputerAgent - INFO - Computer: click({'x': 248, 'y': 291})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 248, 'y': 291})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ca85c226-0c49-4084-b2bc-86bd540c8bce/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:11:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fafe8f9a-bc46-42ad-b3ca-7190a64ab552/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/39724bde-60dd-471d-ba25-1ac9b1405c76/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1326/7340 [44:56<203:48, 29.5 steps/min]\u001b[92m16:11:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:15,137 - agent.ComputerAgent - INFO - Computer: click({'x': 293, 'y': 185})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 293, 'y': 185})\n", - "\u001b[92m16:11:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:15,821 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:11:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:11:16,500 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:11:16,501 - agent.ComputerAgent - INFO - Computer: click({'x': 650, 'y': 362})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 650, 'y': 362})\n", - "\u001b[92m16:11:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1328/7340 [44:58<203:35, 29.5 steps/min]2025-08-11 16:11:17,188 - agent.ComputerAgent - INFO - Computer: double_click({'x': 247, 'y': 153})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 247, 'y': 153})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1330/7340 [44:59<203:19, 29.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:18,491 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:11:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:19,525 - agent.ComputerAgent - INFO - Computer: click({'x': 867, 'y': 233})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 867, 'y': 233})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1331/7340 [45:01<203:18, 29.6 steps/min]\u001b[92m16:11:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:21,243 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:11:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1332/7340 [45:02<203:11, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:21,948 - agent.ComputerAgent - INFO - Computer: click({'x': 573, 'y': 249})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 573, 'y': 249})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1332/7340 [45:04<203:18, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/932fb6ee-8e77-41ca-8220-27e0c8783ced/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d8b3a739-de56-40fe-896f-831373c8ecee/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:23,815 - agent.ComputerAgent - INFO - Computer: click({'x': 254, 'y': 736})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 254, 'y': 736})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89cdf329-a61d-4d69-9c6c-5d0ea35677b6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3b3e7fbd-8c02-45a6-bb3d-83c056398d3f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/80299c20-3bcf-48b1-a471-299a1eda0a00/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b2656d0e-a6f4-4ecb-a099-cfe8471c4998/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1333/7340 [45:05<203:12, 29.6 steps/min]2025-08-11 16:11:24,492 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:11:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:11:25,193 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:11:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1334/7340 [45:06<203:07, 29.6 steps/min]2025-08-11 16:11:26,204 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:11:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1334/7340 [45:08<203:12, 29.6 steps/min]2025-08-11 16:11:27,268 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:11:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c3f7029e-7bbd-43fb-bea4-c66cc9ae685d/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1334/7340 [45:09<203:16, 29.5 steps/min]2025-08-11 16:11:27,895 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:11:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:29,210 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a23ddde7-5509-407d-af64-ea09807c1af1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1334/7340 [45:11<203:29, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:30,853 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:11:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:31,535 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_x': 0, 'scroll_y': 685, 'x': 633, 'y': 405})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_x': 0, 'scroll_y': 685, 'x': 633, 'y': 405})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1334/7340 [45:13<203:35, 29.5 steps/min]2025-08-11 16:11:32,197 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:11:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1335/7340 [45:14<203:31, 29.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:33,951 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:11:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:35,279 - agent.ComputerAgent - INFO - Computer: type({'text': \"bash -lc 'cd ~/Desktop && ls -1 *.png'\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"bash -lc 'cd ~/Desktop && ls -1 *.png'\"})\n", - "2025-08-11 16:11:35,955 - agent.ComputerAgent - INFO - Computer: double_click({'x': 49, 'y': 431})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 49, 'y': 431})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ffbf23fa-9bd6-4b26-befa-cb45d31fc4fa/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1335/7340 [45:18<203:47, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:37,317 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m16:11:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:37,999 - agent.ComputerAgent - INFO - Computer: click({'x': 122, 'y': 176})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 122, 'y': 176})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1337/7340 [45:19<203:31, 29.5 steps/min]2025-08-11 16:11:38,642 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:11:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1338/7340 [45:21<203:26, 29.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:11:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:40,487 - agent.ComputerAgent - INFO - Computer: click({'x': 397, 'y': 624})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 397, 'y': 624})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1339/7340 [45:23<203:24, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/79295f2f-2987-488c-b4b7-c968f71c7597/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:42,633 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:11:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ca85c226-0c49-4084-b2bc-86bd540c8bce/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:11:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1339/7340 [45:25<203:35, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:11:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:45,987 - agent.ComputerAgent - INFO - Computer: type({'text': 'find . -type f -perm -not -0644 -ls | head -n 20'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'find . -type f -perm -not -0644 -ls | head -n 20'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cc2e38be-6768-4928-bfe5-d7f31cb68b24/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:46,670 - agent.ComputerAgent - INFO - Computer: click({'x': 249, 'y': 339})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 249, 'y': 339})\n", - "\u001b[92m16:11:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1339/7340 [45:28<203:47, 29.4 steps/min]\u001b[92m16:11:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:47,338 - agent.ComputerAgent - INFO - Computer: click({'x': 982, 'y': 741})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 982, 'y': 741})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d8b3a739-de56-40fe-896f-831373c8ecee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:48,711 - agent.ComputerAgent - INFO - Computer: click({'x': 1000, 'y': 739})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1000, 'y': 739})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1341/7340 [45:30<203:34, 29.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:49,375 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:11:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:50,053 - agent.ComputerAgent - INFO - Computer: click({'x': 553, 'y': 275})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 553, 'y': 275})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1343/7340 [45:31<203:18, 29.5 steps/min]2025-08-11 16:11:50,708 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:11:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1344/7340 [45:33<203:14, 29.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:52,081 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:11:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:52,743 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:11:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:11:54,178 - agent.ComputerAgent - INFO - Computer: click({'x': 49, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 49, 'y': 53})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1344/7340 [45:35<203:25, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:55,482 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+shift+v'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+shift+v'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/80299c20-3bcf-48b1-a471-299a1eda0a00/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3b3e7fbd-8c02-45a6-bb3d-83c056398d3f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:11:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/932fb6ee-8e77-41ca-8220-27e0c8783ced/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1345/7340 [45:37<203:23, 29.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:11:56,832 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:11:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:11:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:11:57,492 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:11:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:11:58,155 - agent.ComputerAgent - INFO - Computer: click({'x': 496, 'y': 256})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 496, 'y': 256})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1345/7340 [45:39<203:32, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:11:59,512 - agent.ComputerAgent - INFO - Computer: click({'x': 188, 'y': 54})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 188, 'y': 54})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89cdf329-a61d-4d69-9c6c-5d0ea35677b6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:00,870 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+f'})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1346/7340 [45:42<203:33, 29.4 steps/min]2025-08-11 16:12:01,532 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:12:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:02,223 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:12:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e8a299f4-d946-4970-b9a4-2503717de8ce/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1347/7340 [45:44<203:28, 29.5 steps/min]2025-08-11 16:12:02,913 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:12:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:03,573 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:12:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1347/7340 [45:45<203:34, 29.4 steps/min]2025-08-11 16:12:04,252 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:12:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1347/7340 [45:46<203:38, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1347/7340 [45:47<203:43, 29.4 steps/min]\u001b[92m16:12:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:06,650 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_x': 0, 'scroll_y': 677, 'x': 633, 'y': 362})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_x': 0, 'scroll_y': 677, 'x': 633, 'y': 362})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1347/7340 [45:48<203:47, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b2656d0e-a6f4-4ecb-a099-cfe8471c4998/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1348/7340 [45:49<203:41, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa4f593f-4977-4dc4-9238-0a67602a0900/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:12:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:12:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c3f7029e-7bbd-43fb-bea4-c66cc9ae685d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:09,759 - agent.ComputerAgent - INFO - Computer: click({'x': 296, 'y': 736})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 296, 'y': 736})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1348/7340 [45:51<203:50, 29.4 steps/min]\u001b[92m16:12:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:10,408 - agent.ComputerAgent - INFO - Computer: click({'x': 234, 'y': 97})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 234, 'y': 97})\n", - "\u001b[92m16:12:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:11,099 - agent.ComputerAgent - INFO - Computer: click({'x': 332, 'y': 162})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 332, 'y': 162})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1349/7340 [45:52<203:45, 29.4 steps/min]2025-08-11 16:12:11,772 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m16:12:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:12,423 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:12:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1351/7340 [45:54<203:29, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:13,589 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:12:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1351/7340 [45:56<203:37, 29.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:12:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:12:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1351/7340 [45:58<203:46, 29.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:12:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:16,944 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:12:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:17,601 - agent.ComputerAgent - INFO - Computer: click({'x': 684, 'y': 41})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 684, 'y': 41})\n", - "\u001b[92m16:12:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:12:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1351/7340 [46:00<203:55, 29.4 steps/min]\u001b[92m16:12:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:18,978 - agent.ComputerAgent - INFO - Computer: click({'x': 122, 'y': 213})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 122, 'y': 213})\n", - "2025-08-11 16:12:19,629 - agent.ComputerAgent - INFO - Computer: click({'x': 244, 'y': 149})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 244, 'y': 149})\n", - "2025-08-11 16:12:20,317 - agent.ComputerAgent - INFO - Computer: click({'x': 397, 'y': 564})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 397, 'y': 564})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/80299c20-3bcf-48b1-a471-299a1eda0a00/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fafe8f9a-bc46-42ad-b3ca-7190a64ab552/invoke \"HTTP/1.1 200 OK\"\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1352/7340 [46:02<203:53, 29.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:20,972 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:12:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:12:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ffbf23fa-9bd6-4b26-befa-cb45d31fc4fa/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:21,645 - agent.ComputerAgent - INFO - Computer: click({'x': 249, 'y': 339})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 249, 'y': 339})\n", - "2025-08-11 16:12:22,272 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:12:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1355/7340 [46:04<203:28, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1356/7340 [46:05<203:22, 29.4 steps/min]2025-08-11 16:12:24,123 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:12:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:12:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:24,852 - agent.ComputerAgent - INFO - Computer: click({'x': 623, 'y': 359})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 623, 'y': 359})\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1356/7340 [46:06<203:28, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:26,220 - agent.ComputerAgent - INFO - Computer: type({'text': 'source=~/Desktop/dir1; target=~/Desktop/dir3; if [ -d \"$source\" ] && [ -d \"$target\" ]; then rsync -a -f\"+ */\" -f\"- *\" \"$source\" \"$target\"; echo \"Copied directory hierarchy.\"; else echo \"Source or target directory not found\"; fi'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'source=~/Desktop/dir1; target=~/Desktop/dir3; if [ -d \"$source\" ] && [ -d \"$target\" ]; then rsync -a -f\"+ */\" -f\"- *\" \"$source\" \"$target\"; echo \"Copied directory hierarchy.\"; else echo \"Source or target directory not found\"; fi'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 18%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1357/7340 [46:08<203:27, 29.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:12:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:28,062 - agent.ComputerAgent - INFO - Computer: click({'x': 72, 'y': 90})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 72, 'y': 90})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:30,106 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/932fb6ee-8e77-41ca-8220-27e0c8783ced/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cc2e38be-6768-4928-bfe5-d7f31cb68b24/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1358/7340 [46:11<203:29, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89cdf329-a61d-4d69-9c6c-5d0ea35677b6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d8b3a739-de56-40fe-896f-831373c8ecee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:31,450 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 676, 'scroll_x': 0})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 676, 'scroll_x': 0})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ca85c226-0c49-4084-b2bc-86bd540c8bce/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:12:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1361/7340 [46:13<203:05, 29.4 steps/min]2025-08-11 16:12:32,793 - agent.ComputerAgent - INFO - Computer: click({'x': 553, 'y': 280})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 553, 'y': 280})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:12:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:34,134 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:12:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a23ddde7-5509-407d-af64-ea09807c1af1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:12:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/39724bde-60dd-471d-ba25-1ac9b1405c76/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1361/7340 [46:15<203:14, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:35,495 - agent.ComputerAgent - INFO - Computer: type({'text': \"bash -lc 'cd ~/Desktop && ls -1'}\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"bash -lc 'cd ~/Desktop && ls -1'}\"})\n", - "2025-08-11 16:12:36,154 - agent.ComputerAgent - INFO - Computer: click({'x': 263, 'y': 318})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 263, 'y': 318})\n", - "2025-08-11 16:12:36,816 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:12:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:12:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1362/7340 [46:18<203:15, 29.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:37,525 - agent.ComputerAgent - INFO - Computer: click({'x': 426, 'y': 257})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 426, 'y': 257})\n", - "2025-08-11 16:12:38,193 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:12:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1364/7340 [46:20<203:02, 29.4 steps/min]\u001b[92m16:12:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 16:12:39,517 - agent.ComputerAgent - INFO - LLM processing started with 31 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 31 messages\n", - "\u001b[92m16:12:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:12:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:40,192 - agent.ComputerAgent - INFO - Computer: click({'x': 526, 'y': 232})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 526, 'y': 232})\n", - "2025-08-11 16:12:40,843 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:12:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1365/7340 [46:23<203:03, 29.4 steps/min]\u001b[92m16:12:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:42,204 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:12:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:12:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:43,551 - agent.ComputerAgent - INFO - Computer: click({'x': 835, 'y': 36})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 835, 'y': 36})\n", - "2025-08-11 16:12:44,241 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:12:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:44,933 - agent.ComputerAgent - INFO - Computer: click({'x': 433, 'y': 635})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 433, 'y': 635})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b2656d0e-a6f4-4ecb-a099-cfe8471c4998/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3b3e7fbd-8c02-45a6-bb3d-83c056398d3f/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1367/7340 [46:26<202:56, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e8a299f4-d946-4970-b9a4-2503717de8ce/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/79295f2f-2987-488c-b4b7-c968f71c7597/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:45,580 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:12:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:46,215 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:12:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1369/7340 [46:28<202:40, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:46,933 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:12:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:47,614 - agent.ComputerAgent - INFO - LLM processing started with 33 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 33 messages\n", - "\u001b[92m16:12:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/80299c20-3bcf-48b1-a471-299a1eda0a00/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1369/7340 [46:29<202:46, 29.4 steps/min]2025-08-11 16:12:48,293 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:12:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:48,973 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:12:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1369/7340 [46:30<202:52, 29.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:12:50,174 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:12:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fafe8f9a-bc46-42ad-b3ca-7190a64ab552/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1369/7340 [46:31<202:57, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c3f7029e-7bbd-43fb-bea4-c66cc9ae685d/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:51,233 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:12:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89cdf329-a61d-4d69-9c6c-5d0ea35677b6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:52,561 - agent.ComputerAgent - INFO - Computer: type({'text': 'file1'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'file1'})\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1369/7340 [46:34<203:07, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:12:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa4f593f-4977-4dc4-9238-0a67602a0900/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:12:53,913 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:12:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:12:55,675 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+,'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+,'})\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1371/7340 [46:37<202:59, 29.4 steps/min]\u001b[92m16:12:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:12:56,375 - agent.ComputerAgent - INFO - Computer: click({'x': 304, 'y': 735})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 304, 'y': 735})\n", - "2025-08-11 16:12:57,004 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:12:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1371/7340 [46:38<203:05, 29.4 steps/min]2025-08-11 16:12:57,682 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m16:12:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1372/7340 [46:39<202:58, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:13:00,048 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+v'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+v'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1372/7340 [46:41<203:07, 29.4 steps/min]2025-08-11 16:13:00,705 - agent.ComputerAgent - INFO - LLM processing started with 35 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 35 messages\n", - "\u001b[92m16:13:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:13:01,364 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:13:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1372/7340 [46:45<203:22, 29.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ca85c226-0c49-4084-b2bc-86bd540c8bce/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa4f593f-4977-4dc4-9238-0a67602a0900/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:13:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1374/7340 [46:46<203:05, 29.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:13:05,255 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:13:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:13:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa4f593f-4977-4dc4-9238-0a67602a0900/close \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:13:06,551 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "2025-08-11 16:13:07,203 - agent.ComputerAgent - INFO - Computer: click({'x': 232, 'y': 97})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 232, 'y': 97})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ffbf23fa-9bd6-4b26-befa-cb45d31fc4fa/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1376/7340 [46:49<202:59, 29.4 steps/min]2025-08-11 16:13:08,546 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m16:13:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:13:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0cad7a26-2224-4401-9a66-57daca76d380/invoke \"HTTP/1.1 200 OK\"\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1376/7340 [46:51<203:04, 29.4 steps/min]2025-08-11 16:13:09,882 - agent.ComputerAgent - INFO - LLM processing started with 37 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 37 messages\n", - "\u001b[92m16:13:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 19%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------------| 1376/7340 [46:52<203:08, 29.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:13:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': ''})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4e9c5c3-fa17-4f05-8383-03a3cb3c1fba/invoke \"HTTP/1.1 200 OK\"\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1664/7340 [58:10<198:24, 28.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4813e5e3-be12-40e2-9cc0-d5be0ad320cf/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:24:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:24:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c062c21a-1b89-4117-86d3-d763f8af4cbd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a0a74ba-160b-41ee-a6d2-6dc61c143d94/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:24:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:24:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1684/7340 [58:12<195:28, 28.9 steps/min]2025-08-11 16:24:31,005 - agent.ComputerAgent - INFO - Computer: click({'x': 369, 'y': 564})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 369, 'y': 564})\n", - "\u001b[92m16:24:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:24:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:24:31,655 - agent.ComputerAgent - INFO - Computer: click({'x': 842, 'y': 571})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 842, 'y': 571})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:24:32,328 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:24:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:24:32,999 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:24:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:24:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:24:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1684/7340 [58:14<195:37, 28.9 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:24:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:24:34,996 - agent.ComputerAgent - INFO - Computer: type({'text': '3'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '3'})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:24:35,673 - agent.ComputerAgent - INFO - Computer: click({'button': 'right', 'x': 987, 'y': 658})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'right', 'x': 987, 'y': 658})\n", - "2025-08-11 16:24:36,338 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:24:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1686/7340 [58:18<195:30, 28.9 steps/min]\u001b[92m16:24:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:24:37,031 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 280, 'y': 375}, {'x': 802, 'y': 446}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 280, 'y': 375}, {'x': 802, 'y': 446}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:24:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:24:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1688/7340 [58:19<195:17, 28.9 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:24:38,396 - agent.ComputerAgent - INFO - Computer: click({'x': 60, 'y': 35})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 60, 'y': 35})\n", - "\u001b[92m16:24:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e8a299f4-d946-4970-b9a4-2503717de8ce/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7bc07116-76e3-42fb-a0e3-a2273a5caa64/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d8b3a739-de56-40fe-896f-831373c8ecee/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:24:39,048 - agent.ComputerAgent - INFO - Computer: click({'x': 478, 'y': 256})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 478, 'y': 256})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4e9c5c3-fa17-4f05-8383-03a3cb3c1fba/close \"HTTP/1.1 200 OK\"\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1693/7340 [58:22<194:41, 29.0 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3ad517be-7b27-424d-b632-3ba6ff1a1e71/invoke \"HTTP/1.1 200 OK\"\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1695/7340 [58:23<194:26, 29.0 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d8b3a739-de56-40fe-896f-831373c8ecee/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3980166d-0a7d-4a58-a915-07dbe8b607bb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b6b06a1a-197c-499e-a884-cc6bce509fa3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 23%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------------| 1695/7340 [58:25<194:33, 29.0 steps/min]\u001b[92m16:24:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9dca7e41-642b-4cca-8758-834cef0e844c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5da0c259-034b-4ba2-9e95-9d4ae99c7475/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - " 26%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1938/7340 [66:27<185:15, 29.2 steps/min]\u001b[92m16:32:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8956c64b-871b-43e2-84de-047c8ce2a839/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.48s/it]\n", - "2025-08-11 16:32:46,573 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:32:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 26%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1939/7340 [66:29<185:13, 29.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7117b51-399c-45d8-88a1-c54a00b2bc38/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:32:48,835 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:32:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 26%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1939/7340 [66:32<185:19, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:32:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.38s/it]29.1 steps/min]\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:32:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:32:53,100 - agent.ComputerAgent - INFO - Computer: type({'text': '\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08LARS Resources (Backup)'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08LARS Resources (Backup)'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:32:54,407 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+l'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+l'})\n", - " 26%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1939/7340 [66:36<185:31, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:32:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:32:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:32:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:32:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:32:55,743 - agent.ComputerAgent - INFO - Computer: click({'x': 1009, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1009, 'y': 101})\n", - "2025-08-11 16:32:56,380 - agent.ComputerAgent - INFO - Computer: click({'x': 46, 'y': 527})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 46, 'y': 527})\n", - "2025-08-11 16:32:57,051 - agent.ComputerAgent - INFO - Computer: click({'x': 72, 'y': 244})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 72, 'y': 244})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:32:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:32:58,377 - agent.ComputerAgent - INFO - Computer: screenshot({})\n", - "INFO:agent.ComputerAgent:Computer: screenshot({})\n", - "2025-08-11 16:32:59,058 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m16:32:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:32:59,707 - agent.ComputerAgent - INFO - Computer: click({'x': 693, 'y': 698})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 693, 'y': 698})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:32:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 26%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1940/7340 [66:43<185:43, 29.1 steps/min]\u001b[92m16:33:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:02,296 - agent.ComputerAgent - INFO - Computer: click({'x': 70, 'y': 77})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 70, 'y': 77})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 26%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1945/7340 [66:44<185:07, 29.1 steps/min]\u001b[92m16:33:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:03,527 - agent.ComputerAgent - INFO - Computer: click({'x': 635, 'y': 468})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 635, 'y': 468})\n", - "\u001b[92m16:33:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:04,208 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -628, 'scroll_x': 0, 'x': 526, 'y': 463})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -628, 'scroll_x': 0, 'x': 526, 'y': 463})\n", - "\u001b[92m16:33:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4adb2bbf-d6e6-4d15-9e9a-c199cf02d5d6/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1946/7340 [66:45<185:03, 29.1 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 16:33:04,854 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:33:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:05,505 - agent.ComputerAgent - INFO - Computer: click({'x': 969, 'y': 169})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 969, 'y': 169})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1948/7340 [66:47<184:53, 29.2 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:33:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:07,437 - agent.ComputerAgent - INFO - Computer: click({'x': 87, 'y': 181})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 87, 'y': 181})\n", - "\u001b[92m16:33:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3980166d-0a7d-4a58-a915-07dbe8b607bb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f83df7e3-6ab0-404e-9745-09768e42b6fb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6d8a38cc-c8f6-484c-9a6d-e6c404b2c7f9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4813e5e3-be12-40e2-9cc0-d5be0ad320cf/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1949/7340 [66:49<184:49, 29.2 steps/min]2025-08-11 16:33:08,125 - agent.ComputerAgent - INFO - Computer: click({'x': 76, 'y': 321})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 76, 'y': 321})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a3ea8855-19d9-4e10-8208-fd9e060997e3/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:33:08,772 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:33:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:09,435 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:33:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:10,071 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:33:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1950/7340 [66:51<184:49, 29.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:33:11,418 - agent.ComputerAgent - INFO - Computer: type({'text': 'orig=$(find . -path ./fails -prune -o -type f -name \"*failed.ipynb\" -print | wc -l); copied=$(find ./fails -type f -name \"*failed.ipynb\" -print | wc -l); echo \"orig=$orig copied=$copied\"'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'orig=$(find . -path ./fails -prune -o -type f -name \"*failed.ipynb\" -print | wc -l); copied=$(find ./fails -type f -name \"*failed.ipynb\" -print | wc -l); echo \"orig=$orig copied=$copied\"'})\n", - "2025-08-11 16:33:12,446 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:33:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ea9e43cc-3d54-4c89-bb53-a189a3ae9a25/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d0000302-258b-4660-9baa-e149c2ad83fd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b2ca79e3-4425-4cd4-a9dd-42e2431eb008/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1951/7340 [66:54<184:48, 29.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:33:14,195 - agent.ComputerAgent - INFO - Computer: type({'text': 'sudo apt-get update -y && sudo apt-get install -y steghide binwalk exiftool ffmpeg\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'sudo apt-get update -y && sudo apt-get install -y steghide binwalk exiftool ffmpeg\\n'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7117b51-399c-45d8-88a1-c54a00b2bc38/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1952/7340 [66:55<184:44, 29.2 steps/min]2025-08-11 16:33:14,816 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:33:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:15,505 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:33:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:16,172 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/6b741091-faa0-4d97-9592-0dc410b6cc53/reset \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1953/7340 [66:57<184:42, 29.2 steps/min]2025-08-11 16:33:16,865 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1953/7340 [66:58<184:45, 29.2 steps/min]2025-08-11 16:33:17,496 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:33:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1953/7340 [67:01<184:53, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6b741091-faa0-4d97-9592-0dc410b6cc53/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81398d20-3c85-489b-9abc-2af244ec1feb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a0a74ba-160b-41ee-a6d2-6dc61c143d94/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1953/7340 [67:03<184:58, 29.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:22,384 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:33:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:33:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8956c64b-871b-43e2-84de-047c8ce2a839/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:33:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:23,684 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 95})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 95})\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1953/7340 [67:05<185:03, 29.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:24,329 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:33:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/488d7653-4f2d-4576-85c7-d87dc7a875ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:33:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:33:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:33:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:27,033 - agent.ComputerAgent - INFO - Computer: click({'x': 592, 'y': 568})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 592, 'y': 568})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:33:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1954/7340 [67:09<185:06, 29.1 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:33:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:28,365 - agent.ComputerAgent - INFO - Computer: click({'x': 664, 'y': 213})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 664, 'y': 213})\n", - "\u001b[92m16:33:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:29,040 - agent.ComputerAgent - INFO - Computer: click({'x': 489, 'y': 427})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 489, 'y': 427})\n", - "\u001b[92m16:33:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:33:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1955/7340 [67:10<185:02, 29.1 steps/min]2025-08-11 16:33:29,694 - agent.ComputerAgent - INFO - Computer: click({'x': 83, 'y': 139})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 83, 'y': 139})\n", - "2025-08-11 16:33:30,372 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -658, 'scroll_x': 0, 'x': 526, 'y': 432})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -658, 'scroll_x': 0, 'x': 526, 'y': 432})\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1957/7340 [67:12<184:50, 29.1 steps/min]2025-08-11 16:33:31,077 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:33:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:31,780 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:33:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:33:33,106 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1959/7340 [67:14<184:42, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:34,429 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:33:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1959/7340 [67:16<184:46, 29.1 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:35,086 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:33:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:33:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6d8a38cc-c8f6-484c-9a6d-e6c404b2c7f9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f83df7e3-6ab0-404e-9745-09768e42b6fb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4adb2bbf-d6e6-4d15-9e9a-c199cf02d5d6/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:33:35,779 - agent.ComputerAgent - INFO - Computer: click({'x': 86, 'y': 73})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 86, 'y': 73})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4813e5e3-be12-40e2-9cc0-d5be0ad320cf/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/485267e4-f348-45f0-a08d-1d1f28a01f1d/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1959/7340 [67:17<184:50, 29.1 steps/min]2025-08-11 16:33:36,470 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:33:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:33:37,105 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m16:33:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1960/7340 [67:19<184:48, 29.1 steps/min]\u001b[92m16:33:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:38,439 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:33:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:33:39,107 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:33:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1960/7340 [67:20<184:51, 29.1 steps/min]\u001b[92m16:33:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:33:39,749 - agent.ComputerAgent - INFO - Computer: click({'x': 715, 'y': 627})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 715, 'y': 627})\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1960/7340 [67:21<184:54, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/485267e4-f348-45f0-a08d-1d1f28a01f1d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e7117b51-399c-45d8-88a1-c54a00b2bc38/invoke \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1962/7340 [67:22<184:41, 29.1 steps/min]2025-08-11 16:33:41,945 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:33:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/485267e4-f348-45f0-a08d-1d1f28a01f1d/close \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1962/7340 [67:24<184:45, 29.1 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 27%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------------| 1962/7340 [67:25<184:48, 29.1 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:33:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ea9e43cc-3d54-4c89-bb53-a189a3ae9a25/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\\n2
\\n3
'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '1
\\n2
\\n3
'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2c254802-788e-4b4b-98dc-68cd2c6bcce4/invoke \"HTTP/1.1 200 OK\"\n", - " 30%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------------| 2167/7340 [73:25<175:17, 29.5 steps/min]2025-08-11 16:39:45,389 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:39:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 30%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------------| 2168/7340 [73:27<175:13, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8956c64b-871b-43e2-84de-047c8ce2a839/invoke \"HTTP/1.1 200 OK\"\n", - " 30%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------------| 2168/7340 [73:28<175:16, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:39:48,220 - agent.ComputerAgent - INFO - Computer: type({'text': 'Manchester, GB'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Manchester, GB'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8956c64b-871b-43e2-84de-047c8ce2a839/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 30%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------------| 2168/7340 [73:30<175:22, 29.5 steps/min]\u001b[92m16:39:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 30%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------------| 2169/7340 [73:31<175:17, 29.5 steps/min]\u001b[92m16:39:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:39:50,735 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:39:50,735 - agent.ComputerAgent - INFO - Computer: click({'x': 256, 'y': 173})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 256, 'y': 173})\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/71840850-9565-4ed2-8fa2-e4f2ba6ec6a9/invoke \"HTTP/1.1 200 OK\"\n", - " 30%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------------| 2169/7340 [73:32<175:20, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:39:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00> ~/.vimrc && echo Done\\''})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'bash -lc \\'printf \"\\\\n\\\\\" Enable absolute line numbers by default\\\\nset number\\\\n\" >> ~/.vimrc && echo Done\\''})\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2276/7340 [77:50<173:11, 29.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0803e2c2-9de2-40ff-93da-cb49f156cbba/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7e0b0038-3a97-4d93-8c5c-154cc0b95af9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/74442f45-62aa-40d1-9499-ea3e8e0a4c18/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:10,219 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m16:44:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a8ddfa4f-6f9b-4ad8-b763-1881394c9926/invoke \"HTTP/1.1 200 OK\"\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2277/7340 [77:52<173:08, 29.2 steps/min]2025-08-11 16:44:10,891 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:44:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:11,562 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:44:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2277/7340 [77:53<173:11, 29.2 steps/min]2025-08-11 16:44:12,212 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:44:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:14,267 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/475030e1-7ae3-4ced-91fb-2221d956a2ad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "\u001b[92m16:44:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:16,173 - agent.ComputerAgent - INFO - Agent: The heading is now center-aligned and the document has been saved. Task completed.\n", - "INFO:agent.ComputerAgent:Agent: The heading is now center-aligned and the document has been saved. Task completed.\n", - "2025-08-11 16:44:16,851 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 218\n", - " - prompt_tokens: 4658\n", - " - total_tokens: 4876\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 192\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0080\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 218\n", - " - prompt_tokens: 4658\n", - " - total_tokens: 4876\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 192\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0080\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2279/7340 [77:58<173:09, 29.2 steps/min]2025-08-11 16:44:17,521 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:44:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:18,174 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:44:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:44:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:19,530 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:44:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2279/7340 [78:01<173:17, 29.2 steps/min]\u001b[92m16:44:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:20,843 - agent.ComputerAgent - INFO - Computer: click({'x': 1008, 'y': 193})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1008, 'y': 193})\n", - "\u001b[92m16:44:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:44:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:22,141 - agent.ComputerAgent - INFO - Computer: click({'x': 776, 'y': 643})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 776, 'y': 643})\n", - "2025-08-11 16:44:22,791 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2279/7340 [78:04<173:22, 29.2 steps/min]\u001b[92m16:44:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:44:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:44:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:44:23,485 - agent.ComputerAgent - INFO - Computer: click({'x': 977, 'y': 16})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 977, 'y': 16})\n", - "2025-08-11 16:44:24,132 - agent.ComputerAgent - INFO - Computer: click({'x': 111, 'y': 162})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 111, 'y': 162})\n", - "\u001b[92m16:44:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2281/7340 [78:05<173:12, 29.2 steps/min]2025-08-11 16:44:24,809 - agent.ComputerAgent - INFO - Computer: click({'x': 625, 'y': 248})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 625, 'y': 248})\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2283/7340 [78:06<173:01, 29.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ed3b76a0-7eeb-473c-8da1-310545515f47/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:26,462 - agent.ComputerAgent - INFO - LLM processing started with 39 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 39 messages\n", - "\u001b[92m16:44:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5180ec6f-26a5-4ab4-8ca3-87f128083da1/invoke \"HTTP/1.1 200 OK\"\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2284/7340 [78:08<172:58, 29.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:28,788 - agent.ComputerAgent - INFO - Computer: type({'text': 'none.png'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'none.png'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2284/7340 [78:11<173:06, 29.2 steps/min]\u001b[92m16:44:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:44:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ea9e43cc-3d54-4c89-bb53-a189a3ae9a25/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ff8b808f-c3a6-4979-8f9a-c6a25905116c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a46ee6f6-d167-47c4-ad83-e16b88450253/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a39ee9df-d3ba-456a-95cf-3a11a826583b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:32,685 - agent.ComputerAgent - INFO - Computer: type({'text': 'pkill -f soffice.bin\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'pkill -f soffice.bin\\n'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:33,322 - agent.ComputerAgent - INFO - Computer: click({'x': 982, 'y': 632})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 982, 'y': 632})\n", - "\u001b[92m16:44:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:44:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:44:34,677 - agent.ComputerAgent - INFO - Agent: All .jpg files from the photos directory (recursively) have been copied into cpjpg on the Desktop. Verified: 4 .jpg files found and 4 copied.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: All .jpg files from the photos directory (recursively) have been copied into cpjpg on the Desktop. Verified: 4 .jpg files found and 4 copied.\n", - "\n", - "Task completed\n", - "2025-08-11 16:44:35,306 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 494\n", - " - prompt_tokens: 8268\n", - " - total_tokens: 8762\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0153\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 494\n", - " - prompt_tokens: 8268\n", - " - total_tokens: 8762\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0153\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2287/7340 [78:17<172:59, 29.2 steps/min]2025-08-11 16:44:36,616 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 666, 'scroll_x': 0, 'x': 336, 'y': 152})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 666, 'scroll_x': 0, 'x': 336, 'y': 152})\n", - "2025-08-11 16:44:37,280 - agent.ComputerAgent - INFO - Computer: click({'x': 520, 'y': 437})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 520, 'y': 437})\n", - "\u001b[92m16:44:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:44:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:44:37,941 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m16:44:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:38,620 - agent.ComputerAgent - INFO - Computer: click({'x': 514, 'y': 304})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 514, 'y': 304})\n", - "2025-08-11 16:44:39,286 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -200, 'scroll_x': 0, 'x': 589, 'y': 128})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -200, 'scroll_x': 0, 'x': 589, 'y': 128})\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2289/7340 [78:21<172:53, 29.2 steps/min]\u001b[92m16:44:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:39,956 - agent.ComputerAgent - INFO - Computer: click({'x': 351, 'y': 153})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 351, 'y': 153})\n", - "2025-08-11 16:44:40,591 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:44:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:41,646 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:44:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2293/7340 [78:23<172:32, 29.3 steps/min]2025-08-11 16:44:42,285 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:44:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:42,973 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:44:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:44,693 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2294/7340 [78:26<172:32, 29.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ed3b76a0-7eeb-473c-8da1-310545515f47/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fd628f34-1346-4947-bfa4-cf698adb3472/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:45,371 - agent.ComputerAgent - INFO - LLM processing started with 41 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 41 messages\n", - "\u001b[92m16:44:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5180ec6f-26a5-4ab4-8ca3-87f128083da1/invoke \"HTTP/1.1 200 OK\"\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2311/7340 [78:27<170:43, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5180ec6f-26a5-4ab4-8ca3-87f128083da1/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fd628f34-1346-4947-bfa4-cf698adb3472/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2c254802-788e-4b4b-98dc-68cd2c6bcce4/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/74442f45-62aa-40d1-9499-ea3e8e0a4c18/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d4054e85-5304-43a3-b6d7-128e302780cb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7e0b0038-3a97-4d93-8c5c-154cc0b95af9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0803e2c2-9de2-40ff-93da-cb49f156cbba/invoke \"HTTP/1.1 200 OK\"\n", - " 31%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2311/7340 [78:30<170:49, 29.4 steps/min]2025-08-11 16:44:49,145 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:44:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:44:49,827 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:44:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:50,495 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:44:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fd628f34-1346-4947-bfa4-cf698adb3472/close \"HTTP/1.1 200 OK\"\n", - " 32%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2321/7340 [78:32<169:49, 29.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a8ddfa4f-6f9b-4ad8-b763-1881394c9926/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/475030e1-7ae3-4ced-91fb-2221d956a2ad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 32%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2322/7340 [78:33<169:45, 29.6 steps/min]2025-08-11 16:44:53,297 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m16:44:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:44:53,963 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:44:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:44:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 32%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------------| 2322/7340 [78:36<169:52, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:44:55,334 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:44:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk).\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 ~/vim_test.txt && vim ~/vim_test.txt\\''})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'bash -lc \\'printf \"line a\\\\nline b\\\\nline c\\\\n\" > ~/vim_test.txt && vim ~/vim_test.txt\\''})\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2397/7340 [80:43<166:27, 29.7 steps/min]\u001b[92m16:47:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:02,258 - agent.ComputerAgent - INFO - Computer: click({'x': 855, 'y': 476})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 855, 'y': 476})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2399/7340 [80:44<166:18, 29.7 steps/min]\u001b[92m16:47:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:03,544 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 604, 'scroll_x': 0, 'x': 307, 'y': 666})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 604, 'scroll_x': 0, 'x': 307, 'y': 666})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2401/7340 [80:46<166:09, 29.7 steps/min]\u001b[92m16:47:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:47:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:05,490 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:47:05,491 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 465, 'y': 294})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 465, 'y': 294})\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2402/7340 [80:47<166:05, 29.7 steps/min]\u001b[92m16:47:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:06,659 - agent.ComputerAgent - INFO - Computer: click({'x': 637, 'y': 471})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 637, 'y': 471})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:47:07,327 - agent.ComputerAgent - INFO - LLM processing started with 31 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 31 messages\n", - "\u001b[92m16:47:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2403/7340 [80:49<166:02, 29.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:47:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:08,385 - agent.ComputerAgent - INFO - Computer: click({'x': 111, 'y': 270})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 111, 'y': 270})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/475030e1-7ae3-4ced-91fb-2221d956a2ad/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2404/7340 [80:50<165:58, 29.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:09,003 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m16:47:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6fcb07bb-6857-4888-82a0-1fd0dbf2d722/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:47:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:09,698 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 530, 'scroll_x': 0, 'x': 574, 'y': 736})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 530, 'scroll_x': 0, 'x': 574, 'y': 736})\n", - "2025-08-11 16:47:10,366 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:47:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2406/7340 [80:52<165:50, 29.8 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:11,392 - agent.ComputerAgent - INFO - Computer: click({'x': 1008, 'y': 164})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1008, 'y': 164})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d4054e85-5304-43a3-b6d7-128e302780cb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81b23870-39ed-4649-9729-1d4809f713ec/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2407/7340 [80:53<165:46, 29.8 steps/min]2025-08-11 16:47:12,011 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:47:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:12,685 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m16:47:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2408/7340 [80:55<165:44, 29.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:14,028 - agent.ComputerAgent - INFO - LLM processing started with 33 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 33 messages\n", - "\u001b[92m16:47:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7e0b0038-3a97-4d93-8c5c-154cc0b95af9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1473c3f2-39e1-4aff-8d55-0e23dc25a055/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:14,693 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:47:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2408/7340 [80:57<165:48, 29.7 steps/min]\u001b[92m16:47:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:47:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a46ee6f6-d167-47c4-ad83-e16b88450253/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:16,409 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 75})\n", - "2025-08-11 16:47:17,070 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:47:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:18,388 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "\u001b[92m16:47:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2409/7340 [81:00<165:48, 29.7 steps/min]2025-08-11 16:47:19,055 - agent.ComputerAgent - INFO - Computer: click({'x': 458, 'y': 275})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 458, 'y': 275})\n", - "2025-08-11 16:47:19,739 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:47:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2410/7340 [81:01<165:44, 29.7 steps/min]2025-08-11 16:47:20,387 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:47:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:21,068 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m16:47:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2411/7340 [81:02<165:41, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ff8b808f-c3a6-4979-8f9a-c6a25905116c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:22,362 - agent.ComputerAgent - INFO - LLM processing started with 35 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 35 messages\n", - "\u001b[92m16:47:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:47:23,717 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2411/7340 [81:05<165:46, 29.7 steps/min]\u001b[92m16:47:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:24,730 - agent.ComputerAgent - INFO - Computer: double_click({'x': 331, 'y': 111})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 331, 'y': 111})\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2412/7340 [81:06<165:42, 29.7 steps/min]2025-08-11 16:47:25,410 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:47:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2c254802-788e-4b4b-98dc-68cd2c6bcce4/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2413/7340 [81:07<165:38, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0803e2c2-9de2-40ff-93da-cb49f156cbba/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:47:26,610 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:47:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:27,269 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:47:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2413/7340 [81:09<165:43, 29.7 steps/min]\u001b[92m16:47:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "\u001b[92m16:47:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:29,307 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:47:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2414/7340 [81:11<165:39, 29.7 steps/min]\u001b[92m16:47:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:30,494 - agent.ComputerAgent - INFO - Computer: click({'x': 946, 'y': 738})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 946, 'y': 738})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/475030e1-7ae3-4ced-91fb-2221d956a2ad/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2414/7340 [81:12<165:42, 29.7 steps/min]2025-08-11 16:47:31,166 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m16:47:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:31,814 - agent.ComputerAgent - INFO - LLM processing started with 37 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 37 messages\n", - "\u001b[92m16:47:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:47:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6fcb07bb-6857-4888-82a0-1fd0dbf2d722/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2415/7340 [81:13<165:39, 29.7 steps/min]2025-08-11 16:47:32,487 - agent.ComputerAgent - INFO - Computer: click({'x': 351, 'y': 294})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 351, 'y': 294})\n", - "2025-08-11 16:47:33,177 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:47:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2415/7340 [81:14<165:41, 29.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2416/7340 [81:15<165:37, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2416/7340 [81:16<165:39, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2416/7340 [81:17<165:41, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2417/7340 [81:19<165:37, 29.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:38,035 - agent.ComputerAgent - INFO - LLM processing started with 39 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 39 messages\n", - "\u001b[92m16:47:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c3518cd0-0df6-44e9-8393-0c62002bc984/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81b23870-39ed-4649-9729-1d4809f713ec/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:47:38,680 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:47:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2417/7340 [81:20<165:40, 29.7 steps/min]2025-08-11 16:47:39,336 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:47:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:47:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:47:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:41,078 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:47:41,079 - agent.ComputerAgent - INFO - Computer: click({'x': 92, 'y': 359})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 92, 'y': 359})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2417/7340 [81:23<165:46, 29.7 steps/min]\u001b[92m16:47:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2418/7340 [81:24<165:42, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:47:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:47:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:44,087 - agent.ComputerAgent - INFO - Computer: click({'x': 982, 'y': 760})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 982, 'y': 760})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2419/7340 [81:25<165:39, 29.7 steps/min]2025-08-11 16:47:44,730 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:47:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:47:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:47:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:46,038 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 654, 'scroll_x': 0, 'x': 283, 'y': 664})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 654, 'scroll_x': 0, 'x': 283, 'y': 664})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2420/7340 [81:28<165:38, 29.7 steps/min]\u001b[92m16:47:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:47:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:47,983 - agent.ComputerAgent - INFO - Computer: click({'x': 585, 'y': 355})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 585, 'y': 355})\n", - "\u001b[92m16:47:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2421/7340 [81:29<165:34, 29.7 steps/min]2025-08-11 16:47:48,672 - agent.ComputerAgent - INFO - Computer: click({'x': 962, 'y': 234})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 962, 'y': 234})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:49,341 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:47:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:47:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2422/7340 [81:31<165:31, 29.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:50,011 - agent.ComputerAgent - INFO - Computer: click({'x': 392, 'y': 275})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 392, 'y': 275})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:47:51,361 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:47:51,362 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'super'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'super'})\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2423/7340 [81:33<165:29, 29.7 steps/min]\u001b[92m16:47:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:52,058 - agent.ComputerAgent - INFO - LLM processing started with 41 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 41 messages\n", - "\u001b[92m16:47:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:52,742 - agent.ComputerAgent - INFO - Computer: click({'x': 196, 'y': 237})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 196, 'y': 237})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2425/7340 [81:35<165:21, 29.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2426/7340 [81:36<165:17, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7e0b0038-3a97-4d93-8c5c-154cc0b95af9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:47:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:47:55,312 - agent.ComputerAgent - INFO - Computer: click({'x': 351, 'y': 294})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 351, 'y': 294})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a39ee9df-d3ba-456a-95cf-3a11a826583b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "2025-08-11 16:47:55,965 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m16:47:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:47:57,620 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d4054e85-5304-43a3-b6d7-128e302780cb/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2427/7340 [81:39<165:17, 29.7 steps/min]\u001b[92m16:47:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:47:58,279 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m16:47:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:47:58,928 - agent.ComputerAgent - INFO - Computer: click({'x': 316, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 316, 'y': 101})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0803e2c2-9de2-40ff-93da-cb49f156cbba/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2429/7340 [81:40<165:08, 29.7 steps/min]2025-08-11 16:47:59,585 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:47:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:48:00,942 - agent.ComputerAgent - INFO - Computer: type({'text': ':q'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': ':q'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:48:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:48:02,299 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m16:48:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6fcb07bb-6857-4888-82a0-1fd0dbf2d722/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2430/7340 [81:44<165:09, 29.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:48:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2431/7340 [81:45<165:04, 29.7 steps/min]2025-08-11 16:48:03,692 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:48:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:48:04,331 - agent.ComputerAgent - INFO - LLM processing started with 43 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 43 messages\n", - "\u001b[92m16:48:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2431/7340 [81:46<165:07, 29.7 steps/min]\u001b[92m16:48:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:48:04,993 - agent.ComputerAgent - INFO - Computer: click({'x': 458, 'y': 422})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 458, 'y': 422})\n", - "2025-08-11 16:48:05,658 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:48:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d4054e85-5304-43a3-b6d7-128e302780cb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:48:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2431/7340 [81:47<165:09, 29.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:48:07,064 - agent.ComputerAgent - INFO - Computer: click({'x': 474, 'y': 332})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 474, 'y': 332})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81b23870-39ed-4649-9729-1d4809f713ec/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2436/7340 [81:48<164:42, 29.8 steps/min]2025-08-11 16:48:07,712 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:48:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/475030e1-7ae3-4ced-91fb-2221d956a2ad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:48:08,382 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:48:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a46ee6f6-d167-47c4-ad83-e16b88450253/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d4054e85-5304-43a3-b6d7-128e302780cb/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2438/7340 [81:50<164:32, 29.8 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:48:09,668 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m16:48:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2438/7340 [81:51<164:35, 29.8 steps/min]2025-08-11 16:48:10,320 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m16:48:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2438/7340 [81:52<164:37, 29.8 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/1473c3f2-39e1-4aff-8d55-0e23dc25a055/reset \"HTTP/1.1 200 OK\"\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2438/7340 [81:53<164:39, 29.8 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ff8b808f-c3a6-4979-8f9a-c6a25905116c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:48:12,541 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:48:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:48:13,223 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:48:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2438/7340 [81:54<164:42, 29.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1473c3f2-39e1-4aff-8d55-0e23dc25a055/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:48:14,391 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:48:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2438/7340 [81:56<164:44, 29.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:48:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 33%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2440/7340 [81:57<164:35, 29.8 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3e4ea7d7-21a2-4b07-abd4-a3e280e44e0b/close \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:005'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'WEEKDAY(B3;2)>5'})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2477/7340 [83:44<164:25, 29.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/422a20c8-b318-46e4-9f06-d599c9ed261c/reset \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:50:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2479/7340 [83:46<164:15, 29.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:05,091 - agent.ComputerAgent - INFO - Computer: click({'x': 351, 'y': 294})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 351, 'y': 294})\n", - "\u001b[92m16:50:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:50:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:05,754 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m16:50:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2479/7340 [83:48<164:19, 29.6 steps/min]\u001b[92m16:50:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:50:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:07,118 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 471, 'y': 328}, {'x': 351, 'y': 709}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 471, 'y': 328}, {'x': 351, 'y': 709}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:50:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/422a20c8-b318-46e4-9f06-d599c9ed261c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:08,416 - agent.ComputerAgent - INFO - Computer: click({'x': 268, 'y': 188})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 268, 'y': 188})\n", - "2025-08-11 16:50:09,045 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:50:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2480/7340 [83:50<164:18, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:09,730 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:50:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:50:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0803e2c2-9de2-40ff-93da-cb49f156cbba/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:10,446 - agent.ComputerAgent - INFO - Computer: double_click({'x': 618, 'y': 483})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 618, 'y': 483})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2482/7340 [83:52<164:09, 29.6 steps/min]2025-08-11 16:50:11,451 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m16:50:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f73836c4-d8e3-425b-a750-f2319c89164e/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2483/7340 [83:53<164:05, 29.6 steps/min]2025-08-11 16:50:12,123 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:50:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:13,563 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:50:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2483/7340 [83:55<164:09, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/514e0362-c0b3-4216-989f-d260ec405efb/reset \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2483/7340 [83:56<164:11, 29.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81b23870-39ed-4649-9729-1d4809f713ec/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:15,740 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:50:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2483/7340 [83:57<164:13, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:17,970 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:50:17,970 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'win+e'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'win+e'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/514e0362-c0b3-4216-989f-d260ec405efb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a39ee9df-d3ba-456a-95cf-3a11a826583b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1473c3f2-39e1-4aff-8d55-0e23dc25a055/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2483/7340 [84:00<164:19, 29.6 steps/min]\u001b[92m16:50:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:19,965 - agent.ComputerAgent - INFO - Computer: type({'text': 'Orchis theme gnome-look'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Orchis theme gnome-look'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:21,285 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:50:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:50:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2483/7340 [84:03<164:25, 29.5 steps/min]\u001b[92m16:50:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:22,607 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:50:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:23,283 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 626, 'scroll_x': 0, 'x': 588, 'y': 446})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 626, 'scroll_x': 0, 'x': 588, 'y': 446})\n", - "\u001b[92m16:50:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/daac505f-9423-4b29-b11c-9b23c5c9e3ee/reset \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:23,909 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:50:23,909 - agent.ComputerAgent - INFO - Computer: double_click({'x': 989, 'y': 713})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 989, 'y': 713})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2484/7340 [84:05<164:23, 29.5 steps/min]2025-08-11 16:50:24,578 - agent.ComputerAgent - INFO - Computer: click({'x': 412, 'y': 128})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 412, 'y': 128})\n", - "2025-08-11 16:50:25,256 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:50:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2486/7340 [84:07<164:14, 29.6 steps/min]2025-08-11 16:50:25,924 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m16:50:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:26,594 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:50:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2487/7340 [84:09<164:12, 29.6 steps/min]\u001b[92m16:50:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:50:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:28,422 - agent.ComputerAgent - INFO - Computer: double_click({'x': 960, 'y': 713})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 960, 'y': 713})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:29,722 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'win'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'win'})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2487/7340 [84:11<164:17, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/daac505f-9423-4b29-b11c-9b23c5c9e3ee/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:30,365 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m16:50:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fed9747f-6005-4d29-b83e-afc7934c0ff5/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2489/7340 [84:13<164:09, 29.6 steps/min]\u001b[92m16:50:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2b43eb21-4025-495a-8c66-358bfcac034b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:50:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:33,742 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'esc'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'esc'})\n", - "\u001b[92m16:50:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:50:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2489/7340 [84:16<164:14, 29.5 steps/min]\u001b[92m16:50:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:35,048 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:50:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:35,696 - agent.ComputerAgent - INFO - Computer: move({'x': 887, 'y': 167})\n", - "INFO:agent.ComputerAgent:Computer: move({'x': 887, 'y': 167})\n", - "2025-08-11 16:50:36,379 - agent.ComputerAgent - INFO - Computer: click({'x': 260, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 260, 'y': 101})\n", - "\u001b[92m16:50:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:37,041 - agent.ComputerAgent - INFO - Computer: click({'x': 537, 'y': 304})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 537, 'y': 304})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:38,373 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'right'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'right'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f73836c4-d8e3-425b-a750-f2319c89164e/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/422a20c8-b318-46e4-9f06-d599c9ed261c/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2490/7340 [84:20<164:16, 29.5 steps/min]\u001b[92m16:50:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:39,696 - agent.ComputerAgent - INFO - Computer: type({'text': 'Mumbai'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Mumbai'})\n", - "2025-08-11 16:50:40,392 - agent.ComputerAgent - INFO - Computer: click({'x': 746, 'y': 651})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 746, 'y': 651})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2494/7340 [84:22<163:56, 29.6 steps/min]2025-08-11 16:50:41,039 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:50:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a46ee6f6-d167-47c4-ad83-e16b88450253/invoke \"HTTP/1.1 502 Bad Gateway\"\n", - "2025-08-11 16:50:41,701 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:50:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:42,355 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:50:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:24<163:47, 29.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:43,043 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:50:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:25<163:49, 29.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:50:45,382 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+home'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+home'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0803e2c2-9de2-40ff-93da-cb49f156cbba/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1473c3f2-39e1-4aff-8d55-0e23dc25a055/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ff8b808f-c3a6-4979-8f9a-c6a25905116c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81b23870-39ed-4649-9729-1d4809f713ec/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:27<163:53, 29.6 steps/min]2025-08-11 16:50:46,040 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:50:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:46,708 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m16:50:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:47,380 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m16:50:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:29<163:57, 29.5 steps/min]2025-08-11 16:50:48,016 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:50:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:49,339 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:50:49,340 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a46ee6f6-d167-47c4-ad83-e16b88450253/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6195bb79-4eff-4d3b-8b67-f28a4e6a73fa/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:31<164:01, 29.5 steps/min]2025-08-11 16:50:49,998 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:50:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:51,356 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:50:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:34<164:08, 29.5 steps/min]\u001b[92m16:50:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m16:50:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:54,063 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 398, 'y': 89})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 398, 'y': 89})\n", - "\u001b[92m16:50:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:50:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:54,739 - agent.ComputerAgent - INFO - Computer: double_click({'x': 960, 'y': 713})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 960, 'y': 713})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2496/7340 [84:36<164:11, 29.5 steps/min]2025-08-11 16:50:55,391 - agent.ComputerAgent - INFO - Computer: click({'x': 793, 'y': 41})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 793, 'y': 41})\n", - "\u001b[92m16:50:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:56,056 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m16:50:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:50:56,694 - agent.ComputerAgent - INFO - Computer: click({'x': 17, 'y': 427})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 17, 'y': 427})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2498/7340 [84:39<164:06, 29.5 steps/min]\u001b[92m16:50:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:50:58,732 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m16:50:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:50:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:50:59,814 - agent.ComputerAgent - INFO - Computer: click({'x': 318, 'y': 237})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 318, 'y': 237})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:50:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:51:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:51:02,000 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'right'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'right'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2500/7340 [84:43<164:02, 29.5 steps/min]2025-08-11 16:51:02,679 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 16:51:02,680 - agent.ComputerAgent - INFO - Computer: click({'x': 95, 'y': 185})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 95, 'y': 185})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:51:04,082 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+home'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+home'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:51:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:51:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2502/7340 [84:46<163:55, 29.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:51:05,471 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:51:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m16:51:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m16:51:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 16:51:06,153 - agent.ComputerAgent - INFO - Computer: double_click({'x': 615, 'y': 483})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 615, 'y': 483})\n", - "\u001b[92m16:51:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2503/7340 [84:47<163:52, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 16:51:06,824 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 483, 'y': 328}, {'x': 411, 'y': 711}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 483, 'y': 328}, {'x': 411, 'y': 711}]})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2504/7340 [84:48<163:48, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/029e3386-8f59-44a0-9dba-b14a4d9825ef/close \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:49<163:44, 29.5 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:50<163:46, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e5b48f23-7687-494c-b68c-ebdfc70d085f/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:51<163:48, 29.5 steps/min]2025-08-11 16:51:11,251 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m16:51:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2b43eb21-4025-495a-8c66-358bfcac034b/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:53<163:50, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/daac505f-9423-4b29-b11c-9b23c5c9e3ee/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f73836c4-d8e3-425b-a750-f2319c89164e/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/81b23870-39ed-4649-9729-1d4809f713ec/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/422a20c8-b318-46e4-9f06-d599c9ed261c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1473c3f2-39e1-4aff-8d55-0e23dc25a055/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:51:11,946 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m16:51:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:51:12,584 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:51:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e2ffab0a-c998-4bbf-906b-d3aad0586220/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a39ee9df-d3ba-456a-95cf-3a11a826583b/invoke \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:54<163:52, 29.5 steps/min]2025-08-11 16:51:13,334 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m16:51:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:51:13,968 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m16:51:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:55<163:55, 29.5 steps/min]2025-08-11 16:51:14,650 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m16:51:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:51:15,336 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m16:51:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:57<163:58, 29.5 steps/min]2025-08-11 16:51:15,998 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m16:51:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 16:51:16,677 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m16:51:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:58<164:00, 29.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [84:59<164:02, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/c3518cd0-0df6-44e9-8393-0c62002bc984/reset \"HTTP/1.1 200 OK\"\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [85:00<164:04, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:51:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 16:51:20,733 - agent.ComputerAgent - INFO - Computer: type({'text': 'Stockholm'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Stockholm'})\n", - " 34%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------------| 2505/7340 [85:02<164:08, 29.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m16:51:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Slide Pane to toggle it back on.\n", - "- You can resize it by dragging its right edge. If you lose the right-side Properties panel, use View > Sidebar.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: I restored the Slides panel on the left in your LibreOffice Impress.\n", - "\n", - "If it disappears again:\n", - "- Go to the top menu: View > Slide Pane to toggle it back on.\n", - "- You can resize it by dragging its right edge. If you lose the right-side Properties panel, use View > Sidebar.\n", - "\n", - "Task completed\n", - "2025-08-11 17:05:27,005 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 712\n", - " - prompt_tokens: 7355\n", - " - total_tokens: 8067\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 640\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0163\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 712\n", - " - prompt_tokens: 7355\n", - " - total_tokens: 8067\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 640\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0163\n", - " 38%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------------------------| 2809/7340 [99:08<159:55, 28.3 steps/min]\u001b[92m17:05:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f141a0f-f4b0-4f99-b4c4-5217b268c96b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:05:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/025be48d-d757-4973-8c17-e42b8f6814b0/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d2c8050e-87aa-487a-9555-884be2298ade/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:05:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:05:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:05:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:05:30,349 - agent.ComputerAgent - INFO - Computer: click({'x': 87, 'y': 274})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 87, 'y': 274})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b6fc8c3-534a-4e7d-9a9b-4c6bad0e0619/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Appearance > Mode = Light\n", - "\n", - "This forces Chrome to use a light theme regardless of your device theme.\n", - "\n", - "If any websites still appear dark, it could be due to:\n", - "- A dark-mode extension (e.g., Dark Reader). Disable it under Extensions.\n", - "- The experimental flag β€œAuto Dark Mode for Web Contents” being enabled. Visit chrome://flags and ensure it’s Disabled.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: I’ve turned off Dark Mode in Google Chrome by setting the browser’s appearance to Light:\n", - "- Settings > Appearance > Mode = Light\n", - "\n", - "This forces Chrome to use a light theme regardless of your device theme.\n", - "\n", - "If any websites still appear dark, it could be due to:\n", - "- A dark-mode extension (e.g., Dark Reader). Disable it under Extensions.\n", - "- The experimental flag β€œAuto Dark Mode for Web Contents” being enabled. Visit chrome://flags and ensure it’s Disabled.\n", - "\n", - "Task completed.\n", - "2025-08-11 17:10:01,595 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 876\n", - " - prompt_tokens: 8770\n", - " - total_tokens: 9646\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 768\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0197\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 876\n", - " - prompt_tokens: 8770\n", - " - total_tokens: 9646\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 768\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0197\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:10:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:10:02,937 - agent.ComputerAgent - INFO - Computer: click({'x': 400, 'y': 77})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 400, 'y': 77})\n", - "2025-08-11 17:10:03,606 - agent.ComputerAgent - INFO - Computer: double_click({'x': 476, 'y': 392})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 476, 'y': 392})\n", - "\u001b[92m17:10:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:10:04,225 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - " 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 2945/7340 [103:45<154:51, 28.4 steps/min]\u001b[92m17:10:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:10:04,897 - agent.ComputerAgent - INFO - Computer: click({'x': 986, 'y': 578})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 986, 'y': 578})\n", - "\u001b[92m17:10:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:10:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 2948/7340 [103:46<154:37, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:10:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:10:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:10:06,706 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 143, 'y': 188}, {'x': 147, 'y': 229}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 143, 'y': 188}, {'x': 147, 'y': 229}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:10:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:10:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f141a0f-f4b0-4f99-b4c4-5217b268c96b/invoke \"HTTP/1.1 200 OK\"\n", - " 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 2949/7340 [103:49<154:35, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:10:08,028 - agent.ComputerAgent - INFO - Computer: double_click({'x': 730, 'y': 648})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 730, 'y': 648})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 2950/7340 [103:50<154:31, 28.4 steps/min]\u001b[92m17:10:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:10:09,210 - agent.ComputerAgent - INFO - Computer: click({'x': 982, 'y': 167})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 982, 'y': 167})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f141a0f-f4b0-4f99-b4c4-5217b268c96b/invoke \"HTTP/1.1 200 OK\"\n", - " 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 2960/7340 [103:51<153:40, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:10:10,343 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:10:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b6fc8c3-534a-4e7d-9a9b-4c6bad0e0619/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f141a0f-f4b0-4f99-b4c4-5217b268c96b/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa0172ad-f4a9-4f1a-9e06-2d510775dbd0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1f48e361-2592-41ee-8818-d6e9174fe800/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:10:11,005 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:10:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/797f1798-0199-4d66-a503-1c5a8d488911/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:10:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/88706cb5-896e-4bf5-8b52-5df252945e00/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 2961/7340 [103:54<153:39, 28.5 steps/min]2025-08-11 17:10:12,983 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:10:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:10:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ecc77db9-1735-4a1a-ab51-39c4517489fb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6b818afb-eeae-43f9-b1ff-7f82844997e2/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Options > LibreOffice Impress > General: unchecked β€œEnable Presenter Console,” then applied and saved.\n", - "\n", - "How to choose which single monitor shows the slideshow (do this once per machine/setup):\n", - "- Slide Show > Slide Show Settings…\n", - "- Under Multiple displays, set β€œPresentation display” to the monitor you want (Display 1 or Display 2) and keep Presentation mode as β€œFull screen.”\n", - "- Click OK.\n", - "\n", - "Result:\n", - "- When you start the slideshow, it will fill only the selected monitor.\n", - "- The second monitor will remain free for other work, and the β€œCurrent/Next slide” presenter view will no longer appear.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: I configured LibreOffice Impress to use only one monitor for the slideshow and to disable the Presenter Console.\n", - "\n", - "What I changed:\n", - "- Tools > Options > LibreOffice Impress > General: unchecked β€œEnable Presenter Console,” then applied and saved.\n", - "\n", - "How to choose which single monitor shows the slideshow (do this once per machine/setup):\n", - "- Slide Show > Slide Show Settings…\n", - "- Under Multiple displays, set β€œPresentation display” to the monitor you want (Display 1 or Display 2) and keep Presentation mode as β€œFull screen.”\n", - "- Click OK.\n", - "\n", - "Result:\n", - "- When you start the slideshow, it will fill only the selected monitor.\n", - "- The second monitor will remain free for other work, and the β€œCurrent/Next slide” presenter view will no longer appear.\n", - "\n", - "Task completed.\n", - "2025-08-11 17:13:46,540 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 1315\n", - " - prompt_tokens: 13574\n", - " - total_tokens: 14889\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1152\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 9472\n", - " - response_cost: $0.0195\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 1315\n", - " - prompt_tokens: 13574\n", - " - total_tokens: 14889\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1152\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 9472\n", - " - response_cost: $0.0195\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3054/7340 [107:28<150:49, 28.4 steps/min]2025-08-11 17:13:47,229 - agent.ComputerAgent - INFO - Computer: click({'x': 969, 'y': 218})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 969, 'y': 218})\n", - "2025-08-11 17:13:47,886 - agent.ComputerAgent - INFO - Computer: double_click({'x': 205, 'y': 214})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 205, 'y': 214})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:13:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:13:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:13:50,419 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+alt+t'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+alt+t'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3055/7340 [107:32<150:50, 28.4 steps/min]\u001b[92m17:13:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:13:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:13:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:13:51,700 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:13:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:13:52,387 - agent.ComputerAgent - INFO - Computer: click({'x': 954, 'y': 232})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 954, 'y': 232})\n", - "2025-08-11 17:13:53,035 - agent.ComputerAgent - INFO - Computer: click({'x': 589, 'y': 143})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 589, 'y': 143})\n", - "\u001b[92m17:13:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3057/7340 [107:34<150:43, 28.4 steps/min]2025-08-11 17:13:53,673 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 660, 'scroll_x': 0, 'x': 658, 'y': 467})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 660, 'scroll_x': 0, 'x': 658, 'y': 467})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3059/7340 [107:35<150:34, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1f48e361-2592-41ee-8818-d6e9174fe800/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b6fc8c3-534a-4e7d-9a9b-4c6bad0e0619/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:13:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3060/7340 [107:37<150:31, 28.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:13:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:13:56,551 - agent.ComputerAgent - INFO - Computer: click({'x': 660, 'y': 104})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 660, 'y': 104})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1f48e361-2592-41ee-8818-d6e9174fe800/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3060/7340 [107:38<150:33, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1f48e361-2592-41ee-8818-d6e9174fe800/close \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3063/7340 [107:39<150:19, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/025be48d-d757-4973-8c17-e42b8f6814b0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:13:58,853 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:13:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3063/7340 [107:40<150:21, 28.4 steps/min]2025-08-11 17:13:59,510 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m17:13:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:00,189 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:14:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:00,831 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:14:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa0172ad-f4a9-4f1a-9e06-2d510775dbd0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3063/7340 [107:42<150:23, 28.4 steps/min]2025-08-11 17:14:01,477 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m17:14:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:02,169 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m17:14:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0190121-650c-4779-b26d-2480f313dc84/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3063/7340 [107:43<150:25, 28.4 steps/min]2025-08-11 17:14:02,841 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:14:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:03,496 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:14:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3063/7340 [107:45<150:27, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:14:06,224 - agent.ComputerAgent - INFO - Computer: type({'text': 'conda create -n hf python=3.11 --override-channels -c conda-forge -y\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'conda create -n hf python=3.11 --override-channels -c conda-forge -y\\n'})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3064/7340 [107:53<150:33, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 2/4 [00:03<00:03, 1.58s/it]2025-08-11 17:14:12,505 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:14:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3064/7340 [107:55<150:36, 28.4 steps/min]\u001b[92m17:14:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.32s/it]\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:14:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:14:15,999 - agent.ComputerAgent - INFO - Computer: type({'text': 'spider-man toys kids'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'spider-man toys kids'})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3064/7340 [107:57<150:40, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:14:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:14:17,286 - agent.ComputerAgent - INFO - Computer: click({'button': 'right', 'x': 512, 'y': 384})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'right', 'x': 512, 'y': 384})\n", - "\u001b[92m17:14:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:14:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:14:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:14:17,916 - agent.ComputerAgent - INFO - Computer: click({'x': 175, 'y': 183})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 175, 'y': 183})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3065/7340 [107:59<150:37, 28.4 steps/min]2025-08-11 17:14:18,581 - agent.ComputerAgent - INFO - Computer: click({'x': 730, 'y': 275})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 730, 'y': 275})\n", - "2025-08-11 17:14:19,257 - agent.ComputerAgent - INFO - Computer: click({'x': 125, 'y': 182})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 125, 'y': 182})\n", - "\u001b[92m17:14:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:14:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:14:19,937 - agent.ComputerAgent - INFO - Computer: click({'x': 184, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 184, 'y': 178})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3067/7340 [108:02<150:31, 28.4 steps/min]2025-08-11 17:14:21,216 - agent.ComputerAgent - INFO - Computer: double_click({'x': 757, 'y': 644})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 757, 'y': 644})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:14:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3070/7340 [108:04<150:19, 28.4 steps/min]\u001b[92m17:14:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:14:23,849 - agent.ComputerAgent - INFO - Computer: click({'x': 318, 'y': 59})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 318, 'y': 59})\n", - "\u001b[92m17:14:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:14:25,159 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "2025-08-11 17:14:25,792 - agent.ComputerAgent - INFO - Computer: click({'x': 910, 'y': 254})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 910, 'y': 254})\n", - "\u001b[92m17:14:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/reset \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3071/7340 [108:07<150:18, 28.4 steps/min]2025-08-11 17:14:26,480 - agent.ComputerAgent - INFO - Computer: click({'x': 652, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 652, 'y': 178})\n", - "2025-08-11 17:14:27,163 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 654, 'scroll_x': 0, 'x': 654, 'y': 467})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 654, 'scroll_x': 0, 'x': 654, 'y': 467})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3076/7340 [108:09<149:56, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b6fc8c3-534a-4e7d-9a9b-4c6bad0e0619/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:11<149:49, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b6fc8c3-534a-4e7d-9a9b-4c6bad0e0619/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:13<149:50, 28.4 steps/min]2025-08-11 17:14:32,090 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m17:14:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/025be48d-d757-4973-8c17-e42b8f6814b0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4b18a76d-ef46-4622-9643-9ee6fe4900a3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055e9f8b-8c01-4732-8b5f-ef4fc732f122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/35bb6fb7-5b34-473c-a541-13215a694bc6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0190121-650c-4779-b26d-2480f313dc84/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:14:32,771 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m17:14:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:14<149:52, 28.4 steps/min]2025-08-11 17:14:33,404 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:14:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:34,070 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m17:14:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:34,696 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:14:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:35,372 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m17:14:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:36,032 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:14:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/96765d66-53fb-41dd-99b6-cd96984e52b3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa0172ad-f4a9-4f1a-9e06-2d510775dbd0/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:17<149:57, 28.4 steps/min]2025-08-11 17:14:37,022 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m17:14:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:37,680 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:14:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:19<149:59, 28.4 steps/min]2025-08-11 17:14:38,337 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:14:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:39,001 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:14:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:39,701 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m17:14:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:21<150:02, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 25%|β–ˆβ–ˆβ–Œ | 1/4 [00:01<00:04, 1.63s/it] 28.4 steps/min]2025-08-11 17:14:42,801 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:14:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:24<150:06, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:14:43,842 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:14:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:25<150:08, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.33s/it] 28.4 steps/min]\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:14:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:28<150:12, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3078/7340 [108:29<150:14, 28.4 steps/min]\u001b[92m17:14:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:14:48,883 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 17:14:48,885 - agent.ComputerAgent - INFO - Computer: click({'x': 314, 'y': 121})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 314, 'y': 121})\n", - "\u001b[92m17:14:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:14:49,521 - agent.ComputerAgent - INFO - Computer: double_click({'x': 193, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 193, 'y': 178})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3080/7340 [108:32<150:07, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3080/7340 [108:33<150:08, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d351b561-0537-4e9c-84fc-8e1905f2f2c8/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3080/7340 [108:34<150:09, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:14:53,931 - agent.ComputerAgent - INFO - Computer: type({'text': 'Paper Recommendation'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Paper Recommendation'})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3080/7340 [108:35<150:11, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:14:55,591 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:14:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3081/7340 [108:37<150:09, 28.4 steps/min]2025-08-11 17:14:56,259 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:14:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:14:57,587 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3081/7340 [108:39<150:12, 28.4 steps/min]\u001b[92m17:14:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:14:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:14:59,416 - agent.ComputerAgent - INFO - Computer: click({'x': 107, 'y': 33})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 107, 'y': 33})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:15:00,701 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3082/7340 [108:43<150:12, 28.3 steps/min]\u001b[92m17:15:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:15:02,638 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3084/7340 [108:45<150:04, 28.4 steps/min]\u001b[92m17:15:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:15:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:04,289 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:15:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:04,939 - agent.ComputerAgent - INFO - Computer: click({'x': 572, 'y': 551})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 572, 'y': 551})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3084/7340 [108:47<150:07, 28.3 steps/min]\u001b[92m17:15:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:15:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:06,301 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:15:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:15:07,327 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:15:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:15:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:08,631 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 17:15:08,632 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'win'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'win'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3085/7340 [108:51<150:07, 28.3 steps/min]2025-08-11 17:15:09,958 - agent.ComputerAgent - INFO - Computer: click({'x': 349, 'y': 207})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 349, 'y': 207})\n", - "\u001b[92m17:15:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:15:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:11,270 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 147, 'y': 581}, {'x': 147, 'y': 678}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 147, 'y': 581}, {'x': 147, 'y': 678}]})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3086/7340 [108:52<150:05, 28.3 steps/min]\u001b[92m17:15:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:11,906 - agent.ComputerAgent - INFO - Computer: click({'x': 880, 'y': 203})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 880, 'y': 203})\n", - "\u001b[92m17:15:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:12,582 - agent.ComputerAgent - INFO - Computer: click({'x': 918, 'y': 243})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 918, 'y': 243})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3090/7340 [108:55<149:48, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3090/7340 [108:56<149:50, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:15,903 - agent.ComputerAgent - INFO - Computer: click({'x': 894, 'y': 232})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 894, 'y': 232})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3090/7340 [108:57<149:51, 28.4 steps/min]2025-08-11 17:15:16,561 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:15:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:15:17,873 - agent.ComputerAgent - INFO - Computer: type({'text': 'conda activate hf\\nconda install -y -c conda-forge datasets\\npython -c \"import datasets, sys; print(\\'datasets version:\\', datasets.__version__)\"\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'conda activate hf\\nconda install -y -c conda-forge datasets\\npython -c \"import datasets, sys; print(\\'datasets version:\\', datasets.__version__)\"\\n'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/96765d66-53fb-41dd-99b6-cd96984e52b3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/35bb6fb7-5b34-473c-a541-13215a694bc6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:19,817 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+z'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+z'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0190121-650c-4779-b26d-2480f313dc84/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3091/7340 [109:01<149:52, 28.4 steps/min]2025-08-11 17:15:20,461 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:15:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:21,839 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:22,509 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3092/7340 [109:04<149:50, 28.3 steps/min]\u001b[92m17:15:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:15:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:23,191 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:15:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:23,827 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m17:15:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:15:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3093/7340 [109:05<149:47, 28.4 steps/min]2025-08-11 17:15:24,498 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 194, 'y': 182}, {'x': 183, 'y': 294}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 194, 'y': 182}, {'x': 183, 'y': 294}]})\n", - "2025-08-11 17:15:25,826 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:15:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3093/7340 [109:08<149:51, 28.3 steps/min]\u001b[92m17:15:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:27,861 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "\u001b[92m17:15:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3094/7340 [109:09<149:48, 28.3 steps/min]2025-08-11 17:15:28,558 - agent.ComputerAgent - INFO - Computer: click({'x': 205, 'y': 175})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 205, 'y': 175})\n", - "2025-08-11 17:15:29,222 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:15:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3095/7340 [109:11<149:45, 28.3 steps/min]2025-08-11 17:15:29,891 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:15:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:15:30,530 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m17:15:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/025be48d-d757-4973-8c17-e42b8f6814b0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3096/7340 [109:12<149:41, 28.4 steps/min]2025-08-11 17:15:31,171 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:15:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3096/7340 [109:13<149:43, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3096/7340 [109:14<149:44, 28.3 steps/min]2025-08-11 17:15:32,801 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:15:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:15:33,431 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:15:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3096/7340 [109:15<149:45, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3096/7340 [109:16<149:47, 28.3 steps/min]\u001b[92m17:15:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:35,248 - agent.ComputerAgent - INFO - Computer: click({'x': 804, 'y': 654})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 804, 'y': 654})\n", - "2025-08-11 17:15:35,931 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:15:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa0172ad-f4a9-4f1a-9e06-2d510775dbd0/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3096/7340 [109:17<149:49, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:15:37,102 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:15:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3097/7340 [109:18<149:45, 28.3 steps/min]2025-08-11 17:15:37,759 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:15:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3097/7340 [109:22<149:51, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:15:42,613 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+p'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+p'})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3097/7340 [109:24<149:53, 28.3 steps/min]2025-08-11 17:15:43,243 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:15:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:15:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:44,564 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:15:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3097/7340 [109:26<149:56, 28.3 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:45,740 - agent.ComputerAgent - INFO - Computer: click({'x': 408, 'y': 279})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 408, 'y': 279})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3097/7340 [109:27<149:57, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3098/7340 [109:28<149:54, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:48,097 - agent.ComputerAgent - INFO - Computer: click({'x': 880, 'y': 203})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 880, 'y': 203})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:50,080 - agent.ComputerAgent - INFO - Computer: type({'text': 'conda install -y -c conda-forge --override-channels datasets\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'conda install -y -c conda-forge --override-channels datasets\\n'})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3098/7340 [109:31<149:58, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:51,375 - agent.ComputerAgent - INFO - Computer: type({'text': 'python --version\\npython3 --version\\nls /usr/bin/python* | head -n 20\\napt-cache policy python4 || apt-cache search python4 | head\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'python --version\\npython3 --version\\nls /usr/bin/python* | head -n 20\\napt-cache policy python4 || apt-cache search python4 | head\\n'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3100/7340 [109:34<149:52, 28.3 steps/min]2025-08-11 17:15:53,291 - agent.ComputerAgent - INFO - Computer: move({'x': 914, 'y': 232})\n", - "INFO:agent.ComputerAgent:Computer: move({'x': 914, 'y': 232})\n", - "\u001b[92m17:15:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:53,963 - agent.ComputerAgent - INFO - Computer: click({'x': 935, 'y': 351})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 935, 'y': 351})\n", - "\u001b[92m17:15:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:15:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3101/7340 [109:36<149:49, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:55,270 - agent.ComputerAgent - INFO - Computer: click({'x': 225, 'y': 520})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 225, 'y': 520})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3103/7340 [109:37<149:41, 28.3 steps/min]\u001b[92m17:15:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:15:56,620 - agent.ComputerAgent - INFO - Computer: click({'x': 235, 'y': 206})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 235, 'y': 206})\n", - "\u001b[92m17:15:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:15:57,299 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 659, 'scroll_x': 0, 'x': 840, 'y': 467})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 659, 'scroll_x': 0, 'x': 840, 'y': 467})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3104/7340 [109:39<149:38, 28.3 steps/min]2025-08-11 17:15:57,924 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:15:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:15:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3106/7340 [109:40<149:30, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:15:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:15:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:00,272 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 193, 'y': 180}, {'x': 184, 'y': 293}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 193, 'y': 180}, {'x': 184, 'y': 293}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3106/7340 [109:42<149:33, 28.3 steps/min]\u001b[92m17:16:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:16:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:02,072 - agent.ComputerAgent - INFO - Computer: click({'x': 1011, 'y': 62})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1011, 'y': 62})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0190121-650c-4779-b26d-2480f313dc84/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3107/7340 [109:43<149:29, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:16:02,765 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m17:16:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/96765d66-53fb-41dd-99b6-cd96984e52b3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/025be48d-d757-4973-8c17-e42b8f6814b0/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:16:03,441 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:16:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:04,480 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:16:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa0172ad-f4a9-4f1a-9e06-2d510775dbd0/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3108/7340 [109:46<149:28, 28.3 steps/min]2025-08-11 17:16:05,143 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m17:16:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:05,823 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m17:16:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3108/7340 [109:47<149:29, 28.3 steps/min]2025-08-11 17:16:06,914 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:16:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/dc026dd3-8d59-43e0-a475-ecef72f1db12/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3108/7340 [109:48<149:31, 28.3 steps/min]2025-08-11 17:16:07,563 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:16:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:08,252 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:16:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3108/7340 [109:50<149:34, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:16:10,625 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m17:16:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:16:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3108/7340 [109:52<149:36, 28.3 steps/min]2025-08-11 17:16:11,298 - agent.ComputerAgent - INFO - Computer: click({'x': 422, 'y': 249})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 422, 'y': 249})\n", - "\u001b[92m17:16:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:11,982 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:16:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:12,642 - agent.ComputerAgent - INFO - Computer: click({'x': 381, 'y': 91})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 381, 'y': 91})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3110/7340 [109:55<149:30, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:16:15,007 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3111/7340 [109:57<149:28, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3111/7340 [109:59<149:30, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:16:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:16:18,043 - agent.ComputerAgent - INFO - Computer: click({'x': 413, 'y': 587})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 413, 'y': 587})\n", - "\u001b[92m17:16:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:18,709 - agent.ComputerAgent - INFO - Computer: click({'x': 125, 'y': 182})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 125, 'y': 182})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3111/7340 [110:00<149:32, 28.3 steps/min]2025-08-11 17:16:19,335 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:16:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:20,773 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:16:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3113/7340 [110:03<149:26, 28.3 steps/min]\u001b[92m17:16:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:16:22,077 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m17:16:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:16:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:23,083 - agent.ComputerAgent - INFO - Computer: click({'x': 839, 'y': 234})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 839, 'y': 234})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:16:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/730002fc-5760-41b0-97b8-f6783353a242/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3113/7340 [110:06<149:30, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:16:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055e9f8b-8c01-4732-8b5f-ef4fc732f122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:16:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:16:25,728 - agent.ComputerAgent - INFO - Computer: click({'x': 925, 'y': 244})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 925, 'y': 244})\n", - "\u001b[92m17:16:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3114/7340 [110:07<149:26, 28.3 steps/min]2025-08-11 17:16:26,375 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m17:16:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:27,003 - agent.ComputerAgent - INFO - Computer: click({'x': 847, 'y': 404})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 847, 'y': 404})\n", - "\u001b[92m17:16:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3115/7340 [110:08<149:23, 28.3 steps/min]2025-08-11 17:16:28,017 - agent.ComputerAgent - INFO - Computer: click({'x': 880, 'y': 203})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 880, 'y': 203})\n", - "2025-08-11 17:16:28,682 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m17:16:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3116/7340 [110:10<149:21, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:16:30,033 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3117/7340 [110:12<149:18, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/730002fc-5760-41b0-97b8-f6783353a242/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:31,839 - agent.ComputerAgent - INFO - Computer: click({'x': 131, 'y': 181})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 131, 'y': 181})\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3118/7340 [110:13<149:15, 28.3 steps/min]2025-08-11 17:16:32,503 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m17:16:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3119/7340 [110:14<149:11, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3119/7340 [110:15<149:12, 28.3 steps/min]2025-08-11 17:16:34,177 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:16:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:16:36,680 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3119/7340 [110:18<149:16, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0190121-650c-4779-b26d-2480f313dc84/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:16:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:16:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/35bb6fb7-5b34-473c-a541-13215a694bc6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:16:37,927 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:16:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:38,544 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m17:16:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:16:39,208 - agent.ComputerAgent - INFO - Computer: click({'x': 940, 'y': 202})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 940, 'y': 202})\n", - "\u001b[92m17:16:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 42%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------------------| 3119/7340 [110:21<149:21, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:40,538 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 600, 'x': 422, 'y': 249})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 600, 'x': 422, 'y': 249})\n", - "2025-08-11 17:16:41,185 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m17:16:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3121/7340 [110:23<149:14, 28.3 steps/min]\u001b[92m17:16:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:42,861 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:16:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:44,179 - agent.ComputerAgent - INFO - Computer: click({'x': 115, 'y': 184})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 115, 'y': 184})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:16:44,861 - agent.ComputerAgent - INFO - Computer: click({'x': 185, 'y': 177})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 185, 'y': 177})\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3121/7340 [110:26<149:17, 28.3 steps/min]\u001b[92m17:16:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:16:45,524 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 17:16:45,525 - agent.ComputerAgent - INFO - Computer: click({'x': 345, 'y': 202})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 345, 'y': 202})\n", - "2025-08-11 17:16:46,155 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:16:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3123/7340 [110:27<149:09, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:16:48,493 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "\u001b[92m17:16:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6bacb467-6eb5-4ead-ac71-a185d2fa5e80/close \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3124/7340 [110:30<149:07, 28.3 steps/min]2025-08-11 17:16:49,152 - agent.ComputerAgent - INFO - Computer: click({'x': 964, 'y': 734})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 964, 'y': 734})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3125/7340 [110:31<149:04, 28.3 steps/min]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 17:16:50,459 - agent.ComputerAgent - INFO - LLM processing started with 13 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 13 messages\n", - "\u001b[92m17:16:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3126/7340 [110:32<149:01, 28.3 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055e9f8b-8c01-4732-8b5f-ef4fc732f122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:16:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/025be48d-d757-4973-8c17-e42b8f6814b0/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00/dev/null || echo 'no python4 found'\\n\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"which python4 || command -v python4 || ls /usr/bin/python4* 2>/dev/null || echo 'no python4 found'\\n\"})\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3141/7340 [111:10<148:37, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ae2379a3-a039-4954-afc2-582f8ebffdd2/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3143/7340 [111:11<148:28, 28.3 steps/min]2025-08-11 17:17:30,119 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:17:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:17:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:17:32,137 - agent.ComputerAgent - INFO - Computer: type({'text': '=DATEDIF(DATEVALUE(REGEX(A2;\".* \";\"\"));TODAY();\"y\")'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '=DATEDIF(DATEVALUE(REGEX(A2;\".* \";\"\"));TODAY();\"y\")'})\n", - "2025-08-11 17:17:32,777 - agent.ComputerAgent - INFO - LLM processing started with 23 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 23 messages\n", - "\u001b[92m17:17:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:17:34,149 - agent.ComputerAgent - INFO - Computer: type({'text': 'LLM Powered Autonomous Agents.pdf'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'LLM Powered Autonomous Agents.pdf'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:17:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3143/7340 [111:17<148:36, 28.2 steps/min]\u001b[92m17:17:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:17:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:17:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/96765d66-53fb-41dd-99b6-cd96984e52b3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:17:36,722 - agent.ComputerAgent - INFO - Computer: click({'x': 488, 'y': 368})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 488, 'y': 368})\n", - "\u001b[92m17:17:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:17:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3145/7340 [111:18<148:28, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:17:37,353 - agent.ComputerAgent - INFO - Computer: click({'x': 349, 'y': 201})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 349, 'y': 201})\n", - "2025-08-11 17:17:37,994 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 660, 'scroll_x': 0, 'x': 706, 'y': 659})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 660, 'scroll_x': 0, 'x': 706, 'y': 659})\n", - "\u001b[92m17:17:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3147/7340 [111:19<148:19, 28.3 steps/min]\u001b[92m17:17:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:17:38,649 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m17:17:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:17:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:17:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:17:40,658 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "2025-08-11 17:17:41,329 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 194, 'y': 183}, {'x': 184, 'y': 291}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 194, 'y': 183}, {'x': 184, 'y': 291}]})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3149/7340 [111:23<148:14, 28.3 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:17:41,970 - agent.ComputerAgent - INFO - LLM processing started with 25 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 25 messages\n", - "\u001b[92m17:17:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:17:42,600 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:17:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:17:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3150/7340 [111:24<148:11, 28.3 steps/min]2025-08-11 17:17:43,277 - agent.ComputerAgent - INFO - Computer: click({'x': 850, 'y': 202})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 850, 'y': 202})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055e9f8b-8c01-4732-8b5f-ef4fc732f122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/730002fc-5760-41b0-97b8-f6783353a242/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:17:43,930 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m17:17:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3150/7340 [111:25<148:13, 28.3 steps/min]2025-08-11 17:17:44,570 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:17:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:17:45,261 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m17:17:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:17:45,939 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:17:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/091ec079-295e-4528-bad5-f34604d013c2/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3151/7340 [111:27<148:10, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3152/7340 [111:31<148:11, 28.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa0172ad-f4a9-4f1a-9e06-2d510775dbd0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/085317e9-3b47-437e-8528-0a0fc0e6e688/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:17:51,370 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:17:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3152/7340 [111:33<148:13, 28.3 steps/min]\u001b[92m17:17:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:17:52,759 - agent.ComputerAgent - INFO - LLM processing started with 27 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 27 messages\n", - "\u001b[92m17:17:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3152/7340 [111:34<148:14, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:17:53,422 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m17:17:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:17:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3152/7340 [111:36<148:18, 28.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:17:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3153/7340 [111:37<148:14, 28.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0190121-650c-4779-b26d-2480f313dc84/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 25%|β–ˆβ–ˆβ–Œ | 1/4 [00:01<00:05, 1.77s/it]2025-08-11 17:17:57,266 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:17:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3153/7340 [111:39<148:15, 28.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:17:57,933 - agent.ComputerAgent - INFO - LLM processing started with 29 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 29 messages\n", - "\u001b[92m17:17:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.39s/it] 28.2 steps/min]\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:18:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3154/7340 [111:43<148:16, 28.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:18:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:18:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:18:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3167/7340 [111:44<147:14, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:18:03,401 - agent.ComputerAgent - INFO - LLM processing started with 31 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 31 messages\n", - "\u001b[92m17:18:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:18:04,060 - agent.ComputerAgent - INFO - Computer: click({'x': 666, 'y': 219})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 666, 'y': 219})\n", - "\u001b[92m17:18:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:18:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:18:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/094ee49d-29b5-4911-bfc8-7d0e73a55c44/close \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3167/7340 [111:45<147:15, 28.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:18:04,733 - agent.ComputerAgent - INFO - Computer: click({'x': 442, 'y': 162})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 442, 'y': 162})\n", - "2025-08-11 17:18:05,375 - agent.ComputerAgent - INFO - Computer: click({'x': 811, 'y': 336})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 811, 'y': 336})\n", - "2025-08-11 17:18:06,055 - agent.ComputerAgent - INFO - Computer: double_click({'x': 347, 'y': 222})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 347, 'y': 222})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "\u001b[92m17:18:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:18:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3169/7340 [111:48<147:09, 28.3 steps/min]2025-08-11 17:18:07,401 - agent.ComputerAgent - INFO - Computer: click({'x': 536, 'y': 276})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 536, 'y': 276})\n", - "2025-08-11 17:18:08,031 - agent.ComputerAgent - INFO - Computer: double_click({'x': 489, 'y': 368})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 489, 'y': 368})\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3172/7340 [111:49<146:56, 28.4 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3174/7340 [111:50<146:48, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:18:09,694 - agent.ComputerAgent - INFO - LLM processing started with 33 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 33 messages\n", - "\u001b[92m17:18:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3174/7340 [111:52<146:50, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3175/7340 [111:53<146:47, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:18:13,931 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+c'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+c'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:18:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9f1aa688-383a-44cc-9959-87e1e32dd55c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6a6179f5-13f9-4283-a0d1-aaafd881b00a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/730002fc-5760-41b0-97b8-f6783353a242/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/55d96e84-fdd8-4854-8eca-195f1a84dbc3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ae2379a3-a039-4954-afc2-582f8ebffdd2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afb4e623-39bf-4f23-ac18-6c4a71f53c62/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:18:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1909e6f5-b395-4e1d-b1f7-b06406f8731b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/025be48d-d757-4973-8c17-e42b8f6814b0/invoke \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3175/7340 [111:56<146:51, 28.4 steps/min]2025-08-11 17:18:15,929 - agent.ComputerAgent - INFO - LLM processing started with 35 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 35 messages\n", - "\u001b[92m17:18:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:18:16,592 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:18:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/4b18a76d-ef46-4622-9643-9ee6fe4900a3/reset \"HTTP/1.1 200 OK\"\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3175/7340 [111:58<146:53, 28.4 steps/min]2025-08-11 17:18:17,271 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:18:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:18:17,933 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:18:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-----------------------| 3175/7340 [111:59<146:55, 28.3 steps/min]2025-08-11 17:18:18,584 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m17:18:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/b6bbc5bc-5598-4043-be1e-6ebf2da5f046/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 arr[j + 1]:\\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\\n swapped = True\\n if not swapped:\\n break'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '\\n n = len(arr)\\n for i in range(n):\\n swapped = False\\n for j in range(0, n - i - 1):\\n if arr[j] > arr[j + 1]:\\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\\n swapped = True\\n if not swapped:\\n break'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4b4d291-1fca-4038-8670-448014a55182/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3439/7340 [120:46<137:00, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3bde0e0-c60f-4177-b7dd-15e361558126/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3439/7340 [120:47<137:01, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:07,882 - agent.ComputerAgent - INFO - Computer: type({'text': 'libreoffice --headless --convert-to pdf --outdir . -- *.doc\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'libreoffice --headless --convert-to pdf --outdir . -- *.doc\\n'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:27:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/dc026dd3-8d59-43e0-a475-ecef72f1db12/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b6bbc5bc-5598-4043-be1e-6ebf2da5f046/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3439/7340 [120:50<137:04, 28.5 steps/min]2025-08-11 17:27:09,249 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 17:27:09,250 - agent.ComputerAgent - INFO - Computer: double_click({'x': 984, 'y': 491})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 984, 'y': 491})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:09,926 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m17:27:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/94463065-a78e-479a-b964-45ad23a48cbb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3440/7340 [120:52<137:02, 28.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:11,254 - agent.ComputerAgent - INFO - Computer: click({'x': 153, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 153, 'y': 53})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:12,616 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+s'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+s'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:13,257 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:27:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:14,573 - agent.ComputerAgent - INFO - Computer: type({'text': 'do not track'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'do not track'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:15,903 - agent.ComputerAgent - INFO - Computer: type({'text': '30'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '30'})\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3441/7340 [120:57<137:03, 28.4 steps/min]2025-08-11 17:27:16,549 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m17:27:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:27:17,244 - agent.ComputerAgent - INFO - Computer: click({'x': 268, 'y': 329})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 268, 'y': 329})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3444/7340 [120:59<136:52, 28.5 steps/min]\u001b[92m17:27:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:18,557 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m17:27:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:19,249 - agent.ComputerAgent - INFO - Computer: click({'x': 955, 'y': 130})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 955, 'y': 130})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/127b9298-d3cc-4b90-8567-e45146efa729/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3445/7340 [121:00<136:49, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3446/7340 [121:02<136:46, 28.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:27:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:21,579 - agent.ComputerAgent - INFO - Computer: click({'x': 188, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 188, 'y': 105})\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3446/7340 [121:03<136:47, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f8984906-7392-4305-88fa-ae9a4808fa8d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/050a0934-63e8-46a0-8868-de32b28174ef/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3447/7340 [121:04<136:44, 28.5 steps/min]2025-08-11 17:27:23,268 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:27:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/57944bbf-74a1-4e6d-9401-f7b0144460f7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b28701c2-0fa4-4b07-bace-735fd2133893/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:23,935 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:27:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0322563b-daf3-41ae-8a08-f5ecd9282bcc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/792a6953-2092-47e4-a8a8-57a4af4e3be1/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3447/7340 [121:05<136:45, 28.5 steps/min]2025-08-11 17:27:24,567 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:27:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:27:25,196 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:27:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3447/7340 [121:07<136:47, 28.5 steps/min]2025-08-11 17:27:25,887 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:27:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1909e6f5-b395-4e1d-b1f7-b06406f8731b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3447/7340 [121:08<136:48, 28.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:27,295 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:27:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fde8bca8-8a90-4fed-b46f-c24829445665/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:27,985 - agent.ComputerAgent - INFO - Computer: click({'x': 262, 'y': 479})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 262, 'y': 479})\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3447/7340 [121:09<136:50, 28.4 steps/min]2025-08-11 17:27:28,643 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m17:27:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3448/7340 [121:11<136:47, 28.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:30,354 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:27:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3448/7340 [121:12<136:48, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:31,027 - agent.ComputerAgent - INFO - Computer: click({'x': 219, 'y': 134})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 219, 'y': 134})\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3449/7340 [121:14<136:46, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:33,880 - agent.ComputerAgent - INFO - Agent: Added the Dissolve slide transition to the first slide and saved the presentation.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: Added the Dissolve slide transition to the first slide and saved the presentation.\n", - "\n", - "Task completed.\n", - "2025-08-11 17:27:34,565 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 475\n", - " - prompt_tokens: 6691\n", - " - total_tokens: 7166\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 4608\n", - " - response_cost: $0.0079\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 475\n", - " - prompt_tokens: 6691\n", - " - total_tokens: 7166\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 4608\n", - " - response_cost: $0.0079\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3450/7340 [121:17<136:45, 28.4 steps/min]\u001b[92m17:27:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b6bbc5bc-5598-4043-be1e-6ebf2da5f046/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:35,955 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m17:27:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:36,603 - agent.ComputerAgent - INFO - Computer: move({'x': 166, 'y': 68})\n", - "INFO:agent.ComputerAgent:Computer: move({'x': 166, 'y': 68})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4b18a76d-ef46-4622-9643-9ee6fe4900a3/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3450/7340 [121:18<136:46, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3b14802-9f99-46f5-8fa9-9661af7a973d/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:37,265 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:27:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:27:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3451/7340 [121:19<136:43, 28.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:27:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:39,152 - agent.ComputerAgent - INFO - Computer: click({'x': 87, 'y': 158})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 87, 'y': 158})\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3451/7340 [121:20<136:44, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:41,144 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3452/7340 [121:23<136:43, 28.4 steps/min]\u001b[92m17:27:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:27:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:42,465 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:27:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:43,111 - agent.ComputerAgent - INFO - Computer: double_click({'x': 984, 'y': 145})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 984, 'y': 145})\n", - "\u001b[92m17:27:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/57944bbf-74a1-4e6d-9401-f7b0144460f7/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3452/7340 [121:24<136:45, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:27:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:44,178 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:27:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:45,504 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3b14802-9f99-46f5-8fa9-9661af7a973d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3453/7340 [121:27<136:43, 28.4 steps/min]2025-08-11 17:27:46,827 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 86, 'y': 123}, {'x': 83, 'y': 250}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 86, 'y': 123}, {'x': 83, 'y': 250}]})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:27:47,486 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:27:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3466/7340 [121:29<135:47, 28.5 steps/min]\u001b[92m17:27:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:48,140 - agent.ComputerAgent - INFO - Computer: click({'x': 225, 'y': 564})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 225, 'y': 564})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:27:49,491 - agent.ComputerAgent - INFO - Computer: type({'text': 'ls -1 *.doc\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'ls -1 *.doc\\n'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/a4b4d291-1fca-4038-8670-448014a55182/reset \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3467/7340 [121:31<135:45, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3b14802-9f99-46f5-8fa9-9661af7a973d/close \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3469/7340 [121:32<135:38, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/dc026dd3-8d59-43e0-a475-ecef72f1db12/invoke \"HTTP/1.1 200 OK\"\n", - " 47%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------------| 3469/7340 [121:33<135:39, 28.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:27:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:27:53,445 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:27:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4b4d291-1fca-4038-8670-448014a55182/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:28:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:29:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:01,218 - agent.ComputerAgent - INFO - Computer: click({'x': 521, 'y': 422})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 521, 'y': 422})\n", - "2025-08-11 17:29:01,891 - agent.ComputerAgent - INFO - Computer: click({'x': 17, 'y': 335})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 17, 'y': 335})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:02,530 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:29:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3490/7340 [122:44<135:23, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:29:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:03,204 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 17:29:03,205 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 960, 'y': 713})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 960, 'y': 713})\n", - "2025-08-11 17:29:04,634 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -646, 'scroll_x': 0, 'x': 890, 'y': 760})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -646, 'scroll_x': 0, 'x': 890, 'y': 760})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3492/7340 [122:47<135:18, 28.4 steps/min]\u001b[92m17:29:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:05,965 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m17:29:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:29:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:06,998 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 17:29:06,998 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 432})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 432})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:29:08,322 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3496/7340 [122:54<135:08, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/57944bbf-74a1-4e6d-9401-f7b0144460f7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3bde46c9-685b-4102-9ef4-a1535d5fcc85/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/479a3737-3ad4-48da-b73f-c8ea6e38d096/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:29:13,557 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m17:29:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2a28af1e-e61d-489c-a18e-23c5071c9aff/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4b4d291-1fca-4038-8670-448014a55182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1909e6f5-b395-4e1d-b1f7-b06406f8731b/invoke \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3496/7340 [122:55<135:09, 28.4 steps/min]2025-08-11 17:29:14,241 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:29:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0322563b-daf3-41ae-8a08-f5ecd9282bcc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:29:14,878 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m17:29:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:15,558 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:29:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3496/7340 [122:57<135:12, 28.4 steps/min]\u001b[92m17:29:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/792a6953-2092-47e4-a8a8-57a4af4e3be1/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:16,889 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m17:29:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:29:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:17,556 - agent.ComputerAgent - INFO - Computer: click({'x': 599, 'y': 760})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 599, 'y': 760})\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3496/7340 [122:59<135:13, 28.4 steps/min]2025-08-11 17:29:18,237 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:29:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:18,917 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m17:29:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3497/7340 [123:00<135:10, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:19,548 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m17:29:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a5f69ad6-9361-4670-b101-61761113341c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3497/7340 [123:01<135:12, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3497/7340 [123:03<135:14, 28.4 steps/min]\u001b[92m17:29:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:23,251 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fde8bca8-8a90-4fed-b46f-c24829445665/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3497/7340 [123:06<135:17, 28.4 steps/min]\u001b[92m17:29:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:25,847 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 612, 'x': 655, 'y': 419})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 612, 'x': 655, 'y': 419})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:29:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3499/7340 [123:08<135:11, 28.4 steps/min]\u001b[92m17:29:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:27,867 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m17:29:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:28,508 - agent.ComputerAgent - INFO - Computer: click({'x': 256, 'y': 128})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 256, 'y': 128})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:31,110 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "\u001b[92m17:29:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:31,743 - agent.ComputerAgent - INFO - Computer: click({'x': 182, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 182, 'y': 105})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:29:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3499/7340 [123:14<135:16, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:33,138 - agent.ComputerAgent - INFO - Computer: click({'x': 634, 'y': 529})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 634, 'y': 529})\n", - "2025-08-11 17:29:33,791 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 82, 'y': 124}, {'x': 75, 'y': 124}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 82, 'y': 124}, {'x': 75, 'y': 124}]})\n", - "2025-08-11 17:29:34,450 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -517, 'scroll_x': 0, 'x': 46, 'y': 762})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -517, 'scroll_x': 0, 'x': 46, 'y': 762})\n", - "\u001b[92m17:29:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:35,110 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m17:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:35,790 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 673, 'scroll_x': 0, 'x': 86, 'y': 245})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 673, 'scroll_x': 0, 'x': 86, 'y': 245})\n", - "\u001b[92m17:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3501/7340 [123:17<135:11, 28.4 steps/min]2025-08-11 17:29:36,477 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 333})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 333})\n", - "\u001b[92m17:29:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:37,144 - agent.ComputerAgent - INFO - Computer: click({'x': 268, 'y': 329})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 268, 'y': 329})\n", - "2025-08-11 17:29:37,807 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 332, 'y': 308}, {'x': 345, 'y': 308}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 332, 'y': 308}, {'x': 345, 'y': 308}]})\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3508/7340 [123:20<134:44, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f8984906-7392-4305-88fa-ae9a4808fa8d/invoke \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 17:29:39,468 - agent.ComputerAgent - INFO - LLM processing started with 39 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 39 messages\n", - "\u001b[92m17:29:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:29:40,852 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+f'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:29:42,174 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3508/7340 [123:23<134:47, 28.4 steps/min]2025-08-11 17:29:43,192 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m17:29:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4b4d291-1fca-4038-8670-448014a55182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b6bbc5bc-5598-4043-be1e-6ebf2da5f046/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/479a3737-3ad4-48da-b73f-c8ea6e38d096/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0322563b-daf3-41ae-8a08-f5ecd9282bcc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/dc026dd3-8d59-43e0-a475-ecef72f1db12/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/050a0934-63e8-46a0-8868-de32b28174ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/4b18a76d-ef46-4622-9643-9ee6fe4900a3/invoke \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3509/7340 [123:25<134:44, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/94463065-a78e-479a-b964-45ad23a48cbb/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:29:43,878 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m17:29:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:44,537 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m17:29:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:45,557 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:29:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b28701c2-0fa4-4b07-bace-735fd2133893/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2a28af1e-e61d-489c-a18e-23c5071c9aff/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3510/7340 [123:28<134:43, 28.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:47,830 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m17:29:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:48,461 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m17:29:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:29:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3510/7340 [123:30<134:45, 28.4 steps/min]2025-08-11 17:29:49,532 - agent.ComputerAgent - INFO - Computer: click({'x': 728, 'y': 179})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 728, 'y': 179})\n", - "2025-08-11 17:29:50,219 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:29:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3510/7340 [123:32<134:48, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:52,661 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m17:29:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:29:53,352 - agent.ComputerAgent - INFO - Computer: double_click({'x': 181, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 181, 'y': 105})\n", - "\u001b[92m17:29:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:29:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f8984906-7392-4305-88fa-ae9a4808fa8d/invoke \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3511/7340 [123:35<134:47, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:54,670 - agent.ComputerAgent - INFO - Computer: click({'x': 399, 'y': 541})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 399, 'y': 541})\n", - "2025-08-11 17:29:55,330 - agent.ComputerAgent - INFO - LLM processing started with 41 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 41 messages\n", - "\u001b[92m17:29:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b6bbc5bc-5598-4043-be1e-6ebf2da5f046/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3513/7340 [123:37<134:40, 28.4 steps/min]\u001b[92m17:29:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:57,015 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:29:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:29:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3bde46c9-685b-4102-9ef4-a1535d5fcc85/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3514/7340 [123:38<134:37, 28.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:29:57,710 - agent.ComputerAgent - INFO - Computer: click({'x': 525, 'y': 400})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 525, 'y': 400})\n", - "\u001b[92m17:29:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m17:29:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:29:59,392 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m17:29:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:30:00,039 - agent.ComputerAgent - INFO - Computer: click({'x': 1009, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1009, 'y': 101})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/792a6953-2092-47e4-a8a8-57a4af4e3be1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3515/7340 [123:41<134:36, 28.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 17:30:00,720 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m17:30:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:30:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:30:01,758 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -657, 'scroll_x': 0, 'x': 988, 'y': 427})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -657, 'scroll_x': 0, 'x': 988, 'y': 427})\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3517/7340 [123:43<134:29, 28.4 steps/min]2025-08-11 17:30:02,459 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m17:30:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m17:30:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b6bbc5bc-5598-4043-be1e-6ebf2da5f046/close \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3518/7340 [123:44<134:26, 28.4 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m17:30:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:30:04,432 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 638, 'scroll_x': 0, 'x': 90, 'y': 244})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 638, 'scroll_x': 0, 'x': 90, 'y': 244})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f8984906-7392-4305-88fa-ae9a4808fa8d/invoke \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3518/7340 [123:46<134:27, 28.4 steps/min]2025-08-11 17:30:05,089 - agent.ComputerAgent - INFO - LLM processing started with 43 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 43 messages\n", - "\u001b[92m17:30:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3519/7340 [123:47<134:24, 28.4 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/57944bbf-74a1-4e6d-9401-f7b0144460f7/invoke \"HTTP/1.1 200 OK\"\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3519/7340 [123:48<134:25, 28.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4b4d291-1fca-4038-8670-448014a55182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fde8bca8-8a90-4fed-b46f-c24829445665/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/479a3737-3ad4-48da-b73f-c8ea6e38d096/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 17:30:07,300 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m17:30:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m17:30:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 17:30:08,649 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m17:30:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------------| 3520/7340 [123:50<134:23, 28.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 17:30:09,313 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m17:30:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00&1 | sed -n '1,120p'\\n\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"ffmpeg -hide_banner -i video.mp4 2>&1 | sed -n '1,120p'\\n\"})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/83c40b56-f0bf-4b3a-97a5-8a1ae567e0a1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8d107e49-ae48-4b20-a0a1-7facc71e66f7/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4522/7340 [164:12<102:20, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:31,841 - agent.ComputerAgent - INFO - Computer: click({'x': 802, 'y': 437})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 802, 'y': 437})\n", - "2025-08-11 18:10:32,505 - agent.ComputerAgent - INFO - Computer: click({'x': 745, 'y': 540})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 745, 'y': 540})\n", - "2025-08-11 18:10:33,146 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m18:10:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f096381e-eb5b-49dc-8943-c821405cce10/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/afdc88be-f209-412c-8905-25f3e8cbf43a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/53e1a378-de8f-4a22-9dc0-27eef85d8356/invoke \"HTTP/1.1 200 OK\"\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4523/7340 [164:14<102:17, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:10:33,803 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m18:10:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:10:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:34,476 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m18:10:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:10:35,141 - agent.ComputerAgent - INFO - Computer: click({'x': 205, 'y': 152})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 205, 'y': 152})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/91803c09-cf12-4c24-92ec-24bcf68c0897/invoke \"HTTP/1.1 200 OK\"\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4525/7340 [164:17<102:12, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:10:36,437 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m18:10:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:10:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:37,794 - agent.ComputerAgent - INFO - Computer: click({'x': 1014, 'y': 31})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1014, 'y': 31})\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4526/7340 [164:19<102:10, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:10:38,466 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m18:10:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:10:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:39,125 - agent.ComputerAgent - INFO - Computer: click({'x': 399, 'y': 354})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 399, 'y': 354})\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4527/7340 [164:20<102:07, 27.5 steps/min]2025-08-11 18:10:39,795 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:10:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:10:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4528/7340 [164:22<102:04, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m18:10:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:10:42,145 - agent.ComputerAgent - INFO - Computer: click({'x': 437, 'y': 99})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 437, 'y': 99})\n", - "\u001b[92m18:10:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f55f73a3-1816-4f61-8ec1-88f743cec333/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b01cd4a6-3203-476b-8ece-c651b889f821/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4528/7340 [164:24<102:06, 27.5 steps/min]\u001b[92m18:10:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:43,477 - agent.ComputerAgent - INFO - Computer: click({'x': 46, 'y': 52})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 46, 'y': 52})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/941d9ec3-7c28-40f6-b948-70db95115571/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/91803c09-cf12-4c24-92ec-24bcf68c0897/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c83605a3-e62d-48d7-8568-f181d5627773/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fcdab7d3-0448-49dd-b2db-f79a7c74a08b/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:10:44,127 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m18:10:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:10:44,765 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m18:10:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:10:45,424 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m18:10:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:10:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4545/7340 [164:27<101:07, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f0aa6a3e-e61f-49b1-ade9-e8150e333596/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb48f65f-d00e-465a-a0ea-394e844382ca/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:10:46,114 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m18:10:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:10:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:46,753 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 51, 'y': 730}, {'x': 991, 'y': 759}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 51, 'y': 730}, {'x': 991, 'y': 759}]})\n", - "2025-08-11 18:10:47,399 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m18:10:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4546/7340 [164:29<101:05, 27.6 steps/min]2025-08-11 18:10:48,065 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:10:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/91803c09-cf12-4c24-92ec-24bcf68c0897/close \"HTTP/1.1 200 OK\"\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4547/7340 [164:30<101:02, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:10:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m18:10:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:10:50,780 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:10:50,781 - agent.ComputerAgent - INFO - Computer: click({'x': 18, 'y': 237})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 18, 'y': 237})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:10:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------------| 4547/7340 [164:33<101:04, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\n", - " 64%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---------------| 4702/7340 [170:06<95:26, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f55f73a3-1816-4f61-8ec1-88f743cec333/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2bf9cd89-2d6a-4856-a09d-a771bc278600/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Settings > Privacy and security > Security > Safe Browsing.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: Chrome’s Safe Browsing is now set to Enhanced protection. You’ll get real-time warnings about potentially harmful or deceptive websites, dangerous downloads, and malicious extensions.\n", - "\n", - "If you ever want to change this: Chrome menu > Settings > Privacy and security > Security > Safe Browsing.\n", - "\n", - "Task completed\n", - "2025-08-11 18:22:14,920 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 514\n", - " - prompt_tokens: 6726\n", - " - total_tokens: 7240\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0135\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 514\n", - " - prompt_tokens: 6726\n", - " - total_tokens: 7240\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0135\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:22:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26d5566b-d949-4b71-accb-45197078f693/invoke \"HTTP/1.1 200 OK\"\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4823/7340 [175:57<91:49, 27.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:22:16,260 - agent.ComputerAgent - INFO - LLM processing started with 21 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 21 messages\n", - "\u001b[92m18:22:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4823/7340 [175:58<91:50, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cfe4e097-0434-4025-a00a-78e26d753e51/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d9bc2461-8bd1-4c45-bebd-f473293c581c/invoke \"HTTP/1.1 200 OK\"\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4824/7340 [175:59<91:47, 27.4 steps/min]2025-08-11 18:22:17,920 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m18:22:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26d5566b-d949-4b71-accb-45197078f693/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:22:19,103 - agent.ComputerAgent - INFO - LLM processing started with 23 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 23 messages\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4824/7340 [176:00<91:48, 27.4 steps/min]\u001b[92m18:22:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:22:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:22:19,764 - agent.ComputerAgent - INFO - Computer: click({'x': 120, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 120, 'y': 53})\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4824/7340 [176:01<91:48, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d9bc2461-8bd1-4c45-bebd-f473293c581c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4838/7340 [176:02<91:02, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d9bc2461-8bd1-4c45-bebd-f473293c581c/close \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:22:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26d5566b-d949-4b71-accb-45197078f693/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a4a2a38e-bec8-46b5-b9c9-3e82144e6ff7/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:22:22,630 - agent.ComputerAgent - INFO - LLM processing started with 25 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 25 messages\n", - "\u001b[92m18:22:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:22:23,272 - agent.ComputerAgent - INFO - Computer: click({'x': 833, 'y': 385})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 833, 'y': 385})\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4838/7340 [176:05<91:03, 27.5 steps/min]2025-08-11 18:22:23,933 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m18:22:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:22:25,259 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+alt+t'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+alt+t'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4840/7340 [176:07<90:58, 27.5 steps/min]\u001b[92m18:22:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:22:25,941 - agent.ComputerAgent - INFO - Computer: click({'x': 989, 'y': 73})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 989, 'y': 73})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/06215fad-881b-4e96-84a9-854f2d453fc5/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:22:26,601 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m18:22:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4840/7340 [176:08<90:58, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:22:27,300 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:22:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:22:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:22:27,963 - agent.ComputerAgent - INFO - Computer: click({'x': 901, 'y': 312})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 901, 'y': 312})\n", - " 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--------------| 4841/7340 [176:09<90:56, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:22:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26d5566b-d949-4b71-accb-45197078f693/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ad87d89c-437d-4ed4-b0f0-a157e7d11bbd/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:22:29,233 - agent.ComputerAgent - INFO - LLM processing started with 27 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 27 messages\n", - "\u001b[92m18:22:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Lock).\n", - "2) Select the text box from the Layers/Selection panel (don’t click on the canvas).\n", - "3) Snap it to the left edge:\n", - "- Either set the X position to 0.\n", - "- Or use the Align Left to canvas/page/artboard command.\n", - "4) Unlock/show the image layer again if needed.\n", - "\n", - "Exact steps by app\n", - "- Figma:\n", - " - Lock image: Ctrl+Shift+L (Cmd+Shift+L on Mac) or click the lock icon in Layers.\n", - " - Select the text layer in Layers.\n", - " - In the right panel, set X = 0. Or select the frame and the text, then click Align left. Tip: Cmd/Ctrl+Click cycles through stacked layers; Right‑click > Select layer also helps.\n", - "\n", - "- Photoshop:\n", - " - Lock image layer (padlock in Layers).\n", - " - Select text layer in Layers.\n", - " - Press V for Move tool. Press Ctrl+A (Cmd+A) to Select All, click Align left edges in the top options bar, then Ctrl+D (Cmd+D) to deselect. Alternatively, Window > Properties and set X = 0 px.\n", - "\n", - "- Illustrator:\n", - " - Lock image: Object > Lock > Selection (Ctrl+2 / Cmd+2) or lock in Layers.\n", - " - Select text object.\n", - " - Align panel: set Align To = Artboard, then Horizontal Align Left. Or Transform panel X = 0.\n", - "\n", - "- InDesign:\n", - " - Lock image (Ctrl+L / Cmd+L) or via Layers.\n", - " - Select text frame.\n", - " - Window > Object & Layout > Align. Set Align To = Page, then Align Left. Or set X = 0 in the Control/Properties panel.\n", - "\n", - "- Canva:\n", - " - Select image > Lock (padlock on toolbar).\n", - " - Select text > Position > Left. Nudge with arrow keys if needed; it will snap to the page edge.\n", - "\n", - "- PowerPoint:\n", - " - Home > Select > Selection Pane: click the eye to hide the image (or select the text from the pane).\n", - " - Shape Format > Align > Align to Slide > Align Left.\n", - "\n", - "- Google Slides:\n", - " - Arrange > Order to move im...\n", - "INFO:agent.ComputerAgent:Agent: I can’t see your design app/document open on this computer, so I can’t move the text box directly. Here’s the fastest, foolproof way to get your text box flush left without ever clicking the image again:\n", - "\n", - "Universal quick steps (work in any editor)\n", - "1) Lock or hide the image layer.\n", - "- Use the Layers/Selection panel and click the lock icon on the image layer (or right‑click > Lock).\n", - "2) Select the text box from the Layers/Selection panel (don’t click on the canvas).\n", - "3) Snap it to the left edge:\n", - "- Either set the X position to 0.\n", - "- Or use the Align Left to canvas/page/artboard command.\n", - "4) Unlock/show the image layer again if needed.\n", - "\n", - "Exact steps by app\n", - "- Figma:\n", - " - Lock image: Ctrl+Shift+L (Cmd+Shift+L on Mac) or click the lock icon in Layers.\n", - " - Select the text layer in Layers.\n", - " - In the right panel, set X = 0. Or select the frame and the text, then click Align left. Tip: Cmd/Ctrl+Click cycles through stacked layers; Right‑click > Select layer also helps.\n", - "\n", - "- Photoshop:\n", - " - Lock image layer (padlock in Layers).\n", - " - Select text layer in Layers.\n", - " - Press V for Move tool. Press Ctrl+A (Cmd+A) to Select All, click Align left edges in the top options bar, then Ctrl+D (Cmd+D) to deselect. Alternatively, Window > Properties and set X = 0 px.\n", - "\n", - "- Illustrator:\n", - " - Lock image: Object > Lock > Selection (Ctrl+2 / Cmd+2) or lock in Layers.\n", - " - Select text object.\n", - " - Align panel: set Align To = Artboard, then Horizontal Align Left. Or Transform panel X = 0.\n", - "\n", - "- InDesign:\n", - " - Lock image (Ctrl+L / Cmd+L) or via Layers.\n", - " - Select text frame.\n", - " - Window > Object & Layout > Align. Set Align To = Page, then Align Left. Or set X = 0 in the Control/Properties panel.\n", - "\n", - "- Canva:\n", - " - Select image > Lock (padlock on toolbar).\n", - " - Select text > Position > Left. Nudge with arrow keys if needed; it will snap to the page edge.\n", - "\n", - "- PowerPoint:\n", - " - Home > Select > Selection Pane: click the eye to hide the image (or select the text from the pane).\n", - " - Shape Format > Align > Align to Slide > Align Left.\n", - "\n", - "- Google Slides:\n", - " - Arrange > Order to move im...\n", - "2025-08-11 18:35:23,380 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 3277\n", - " - prompt_tokens: 1951\n", - " - total_tokens: 5228\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 2624\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0352\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 3277\n", - " - prompt_tokens: 1951\n", - " - total_tokens: 5228\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 2624\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0352\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5147/7340 [189:05<80:33, 27.2 steps/min]2025-08-11 18:35:24,059 - agent.ComputerAgent - INFO - LLM processing started with 33 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 33 messages\n", - "\u001b[92m18:35:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5147/7340 [189:06<80:34, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:35:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5147/7340 [189:07<80:34, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c268b680-eafe-4b8d-914a-28e5540231cd/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:35:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m18:35:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:35:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0d923fcd-4666-4869-8ad2-17460c904167/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:35:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5147/7340 [189:08<80:35, 27.2 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:35:27,111 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 76, 'y': 181}, {'x': 124, 'y': 181}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 76, 'y': 181}, {'x': 124, 'y': 181}]})\n", - "\u001b[92m18:35:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:35:27,806 - agent.ComputerAgent - INFO - Computer: click({'x': 290, 'y': 149})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 290, 'y': 149})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5148/7340 [189:09<80:32, 27.2 steps/min]2025-08-11 18:35:28,462 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m18:35:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5150/7340 [189:10<80:26, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:35:29,639 - agent.ComputerAgent - INFO - LLM processing started with 35 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 35 messages\n", - "\u001b[92m18:35:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5150/7340 [189:13<80:28, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2350cfc0-8e2e-4302-b87c-df7d35bd6eea/invoke \"HTTP/1.1 200 OK\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5151/7340 [189:14<80:25, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:35:33,840 - agent.ComputerAgent - INFO - LLM processing started with 37 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 37 messages\n", - "\u001b[92m18:35:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89880137-9134-4973-9389-b3535802254c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5151/7340 [189:15<80:25, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:35:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5151/7340 [189:16<80:26, 27.2 steps/min]\u001b[92m18:35:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:35:35,720 - agent.ComputerAgent - INFO - Computer: click({'x': 207, 'y': 488})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 207, 'y': 488})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5152/7340 [189:17<80:23, 27.2 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2350cfc0-8e2e-4302-b87c-df7d35bd6eea/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c268b680-eafe-4b8d-914a-28e5540231cd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:35:38,050 - agent.ComputerAgent - INFO - Agent: I converted the opened ODS on your Desktop to a CSV with the same base name while LibreOffice was running.\n", - "\n", - "Command used:\n", - "mkdir -p /tmp/lo-temp-profile && libreoffice --headless -env:UserInstallation=file:///tmp/lo-temp-profile --convert-to \"csv:Text - txt - csv (StarCalc):44,34,0\" --outdir ~/Desktop ~/Desktop/file_example_ODS_5000.ods\n", - "\n", - "Notes:\n", - "- A temporary LibreOffice profile was used (-env:UserInstallation=…) so the conversion works even with the GUI instance open.\n", - "- The CSV file has been created on your Desktop as file_example_ODS_5000.csv.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: I converted the opened ODS on your Desktop to a CSV with the same base name while LibreOffice was running.\n", - "\n", - "Command used:\n", - "mkdir -p /tmp/lo-temp-profile && libreoffice --headless -env:UserInstallation=file:///tmp/lo-temp-profile --convert-to \"csv:Text - txt - csv (StarCalc):44,34,0\" --outdir ~/Desktop ~/Desktop/file_example_ODS_5000.ods\n", - "\n", - "Notes:\n", - "- A temporary LibreOffice profile was used (-env:UserInstallation=…) so the conversion works even with the GUI instance open.\n", - "- The CSV file has been created on your Desktop as file_example_ODS_5000.csv.\n", - "\n", - "Task completed.\n", - "2025-08-11 18:35:38,700 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 920\n", - " - prompt_tokens: 10826\n", - " - total_tokens: 11746\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 768\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0227\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 920\n", - " - prompt_tokens: 10826\n", - " - total_tokens: 11746\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 768\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0227\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2350cfc0-8e2e-4302-b87c-df7d35bd6eea/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c268b680-eafe-4b8d-914a-28e5540231cd/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5173/7340 [189:20<79:18, 27.3 steps/min]2025-08-11 18:35:39,393 - agent.ComputerAgent - INFO - LLM processing started with 39 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 39 messages\n", - "\u001b[92m18:35:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055914cd-07b0-4dcd-9407-c6975b1eccbf/invoke \"HTTP/1.1 200 OK\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5173/7340 [189:22<79:19, 27.3 steps/min]2025-08-11 18:35:41,381 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m18:35:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:35:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5afdf327-0d8f-4749-8016-19cb1aedf273/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/reset \"HTTP/1.1 200 OK\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5173/7340 [189:24<79:20, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5174/7340 [189:25<79:18, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5afdf327-0d8f-4749-8016-19cb1aedf273/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 25%|β–ˆβ–ˆβ–Œ | 1/4 [00:01<00:04, 1.67s/it]2025-08-11 18:35:45,241 - agent.ComputerAgent - INFO - LLM processing started with 41 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 41 messages\n", - "\u001b[92m18:35:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5182/7340 [189:26<78:53, 27.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5afdf327-0d8f-4749-8016-19cb1aedf273/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 2/4 [00:03<00:03, 1.62s/it]27.4 steps/min]2025-08-11 18:35:47,491 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:35:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 3/4 [00:04<00:01, 1.58s/it]27.4 steps/min]2025-08-11 18:35:48,319 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:35:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.33s/it]27.4 steps/min]\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5183/7340 [189:31<78:52, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:35:50,250 - agent.ComputerAgent - INFO - LLM processing started with 43 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 43 messages\n", - "\u001b[92m18:35:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5183/7340 [189:32<78:52, 27.3 steps/min]\u001b[92m18:35:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:35:50,943 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 713, 'scroll_x': 0, 'x': 716, 'y': 646})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 713, 'scroll_x': 0, 'x': 716, 'y': 646})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5184/7340 [189:34<78:50, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5185/7340 [189:35<78:47, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5185/7340 [189:38<78:49, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:35:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5185/7340 [189:39<78:49, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b006d7b-b853-41ed-8a84-b7eaa5b6e94b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 25%|β–ˆβ–ˆβ–Œ | 1/4 [00:01<00:04, 1.66s/it]2025-08-11 18:35:59,836 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+shift+p'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+shift+p'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d05e9e78-ad03-41fc-a347-043ec46bd299/close \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 2/4 [00:03<00:03, 1.61s/it]27.3 steps/min]2025-08-11 18:36:01,386 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m18:36:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5185/7340 [189:43<78:51, 27.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:36:02,052 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m18:36:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.40s/it]27.3 steps/min]\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5185/7340 [189:46<78:52, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:36:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5185/7340 [189:47<78:52, 27.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 25%|β–ˆβ–ˆβ–Œ | 1/4 [00:01<00:04, 1.64s/it]27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89880137-9134-4973-9389-b3535802254c/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5186/7340 [189:49<78:50, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/89880137-9134-4973-9389-b3535802254c/close \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5186/7340 [189:51<78:51, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/497d5104-1e6e-44a9-a164-fec745a337b6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.35s/it]27.3 steps/min]\n", - "\u001b[92m18:36:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:36:12,164 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:36:12,165 - agent.ComputerAgent - INFO - Computer: click({'x': 16, 'y': 428})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 16, 'y': 428})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5186/7340 [189:54<78:52, 27.3 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5187/7340 [189:56<78:50, 27.3 steps/min]\u001b[92m18:36:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:36:15,365 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:36:15,367 - agent.ComputerAgent - INFO - Computer: double_click({'x': 989, 'y': 650})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 989, 'y': 650})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5188/7340 [189:59<78:48, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:36:18,079 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m18:36:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5188/7340 [190:02<78:49, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:36:21,321 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m18:36:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5188/7340 [190:04<78:50, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:36:24,203 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+c'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+c'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5188/7340 [190:05<78:51, 27.3 steps/min]2025-08-11 18:36:25,341 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m18:36:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5188/7340 [190:07<78:51, 27.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5188/7340 [190:10<78:52, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:36:29,780 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'pagedown'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'pagedown'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5189/7340 [190:12<78:50, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:36:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5189/7340 [190:13<78:51, 27.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 2/4 [00:03<00:03, 1.58s/it]27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b006d7b-b853-41ed-8a84-b7eaa5b6e94b/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5189/7340 [190:17<78:53, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 3/4 [00:04<00:01, 1.56s/it]\u001b[92m18:36:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.31s/it]27.3 steps/min]\n", - "2025-08-11 18:36:37,871 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m18:36:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:36:39,406 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+shift+p'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+shift+p'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5189/7340 [190:21<78:54, 27.3 steps/min]\u001b[92m18:36:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:36:40,047 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 142})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 142})\n", - "\u001b[92m18:36:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:36:40,733 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 629})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 629})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5189/7340 [190:22<78:54, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0d923fcd-4666-4869-8ad2-17460c904167/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:23<78:18, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0d923fcd-4666-4869-8ad2-17460c904167/close \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:24<78:18, 27.3 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:26<78:19, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:36:46,172 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:36:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:36:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055914cd-07b0-4dcd-9407-c6975b1eccbf/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:28<78:20, 27.3 steps/min]2025-08-11 18:36:47,482 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m18:36:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:30<78:21, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3df65c5-9d1c-44fd-b9bb-37f1f0cd64dc/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:32<78:21, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:33<78:22, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e42da596-e101-4fd3-9dea-8a1d63615dad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1fef1c7a-93ef-4a63-b067-399dfc4ff08a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/b3df65c5-9d1c-44fd-b9bb-37f1f0cd64dc/reset \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 3/4 [00:04<00:01, 1.57s/it]27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/497d5104-1e6e-44a9-a164-fec745a337b6/reset \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.32s/it]\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:36:55,357 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3df65c5-9d1c-44fd-b9bb-37f1f0cd64dc/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5201/7340 [190:37<78:23, 27.3 steps/min]2025-08-11 18:36:56,039 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:36:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:36:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:36:56,709 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 387})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 387})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/497d5104-1e6e-44a9-a164-fec745a337b6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/e42da596-e101-4fd3-9dea-8a1d63615dad/reset \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5202/7340 [190:38<78:21, 27.3 steps/min]2025-08-11 18:36:57,382 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:36:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/1fef1c7a-93ef-4a63-b067-399dfc4ff08a/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:36:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5203/7340 [190:39<78:18, 27.3 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e42da596-e101-4fd3-9dea-8a1d63615dad/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:36:58,641 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:36:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:36:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:36:59,325 - agent.ComputerAgent - INFO - Computer: click({'x': 368, 'y': 561})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 368, 'y': 561})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5203/7340 [190:41<78:19, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1fef1c7a-93ef-4a63-b067-399dfc4ff08a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:00,501 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:37:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5204/7340 [190:42<78:16, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af5d4b08-d761-4bf8-a2c7-7ce16ed98ab9/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5204/7340 [190:43<78:16, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:03,933 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:37:03,935 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'meta'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'meta'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5204/7340 [190:45<78:17, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:37:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:05,291 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m18:37:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:37:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:06,012 - agent.ComputerAgent - INFO - Computer: click({'x': 524, 'y': 503})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 524, 'y': 503})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055914cd-07b0-4dcd-9407-c6975b1eccbf/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5205/7340 [190:47<78:15, 27.3 steps/min]\u001b[92m18:37:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:06,653 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:37:06,654 - agent.ComputerAgent - INFO - Computer: click({'x': 13, 'y': 41})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 13, 'y': 41})\n", - "2025-08-11 18:37:07,313 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:37:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:08,648 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:37:08,649 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'win'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'win'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5206/7340 [190:50<78:13, 27.3 steps/min]2025-08-11 18:37:09,323 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m18:37:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5208/7340 [190:55<78:09, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b006d7b-b853-41ed-8a84-b7eaa5b6e94b/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5208/7340 [190:56<78:09, 27.3 steps/min]2025-08-11 18:37:15,613 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m18:37:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3df65c5-9d1c-44fd-b9bb-37f1f0cd64dc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e42da596-e101-4fd3-9dea-8a1d63615dad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1fef1c7a-93ef-4a63-b067-399dfc4ff08a/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:openai._base_client:Retrying request to /chat/completions in 0.403242 seconds\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5208/7340 [190:57<78:10, 27.3 steps/min]2025-08-11 18:37:16,773 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m18:37:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:18,458 - agent.ComputerAgent - INFO - Computer: type({'text': 'drive.google.com'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'drive.google.com'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5208/7340 [191:00<78:11, 27.3 steps/min]2025-08-11 18:37:19,460 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m18:37:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5209/7340 [191:01<78:08, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5209/7340 [191:02<78:09, 27.3 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:21,836 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m18:37:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5209/7340 [191:03<78:09, 27.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:37:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:22,505 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:37:22,506 - agent.ComputerAgent - INFO - Computer: click({'x': 1006, 'y': 9})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1006, 'y': 9})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5209/7340 [191:04<78:10, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:24,905 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+v'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+v'})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5210/7340 [191:06<78:07, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:26,064 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m18:37:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:37:26,733 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5210/7340 [191:08<78:08, 27.3 steps/min]\u001b[92m18:37:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:37:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/497d5104-1e6e-44a9-a164-fec745a337b6/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5210/7340 [191:10<78:09, 27.3 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m18:37:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:29,395 - agent.ComputerAgent - INFO - Computer: click({'x': 16, 'y': 429})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 16, 'y': 429})\n", - "\u001b[92m18:37:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:30,066 - agent.ComputerAgent - INFO - Computer: click({'x': 18, 'y': 45})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 18, 'y': 45})\n", - "2025-08-11 18:37:30,744 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m18:37:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5210/7340 [191:12<78:10, 27.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:37:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:31,387 - agent.ComputerAgent - INFO - Computer: click({'x': 18, 'y': 239})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 18, 'y': 239})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5212/7340 [191:13<78:04, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5213/7340 [191:14<78:01, 27.3 steps/min]\u001b[92m18:37:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:33,218 - agent.ComputerAgent - INFO - Computer: click({'x': 49, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 49, 'y': 53})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:34,578 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c1b31663-de2f-4fd6-a091-28bf62a74f86/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5214/7340 [191:16<77:59, 27.3 steps/min]INFO:openai._base_client:Retrying request to /chat/completions in 0.421017 seconds\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5215/7340 [191:17<77:56, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1fef1c7a-93ef-4a63-b067-399dfc4ff08a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:36,730 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:37:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b3df65c5-9d1c-44fd-b9bb-37f1f0cd64dc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5215/7340 [191:18<77:57, 27.3 steps/min]2025-08-11 18:37:37,733 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:37:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e42da596-e101-4fd3-9dea-8a1d63615dad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5215/7340 [191:19<77:57, 27.3 steps/min]2025-08-11 18:37:38,404 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:37:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5215/7340 [191:21<77:58, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a6ead00-3730-4f34-9acb-3c8109ec140a/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:40,395 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m18:37:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:37:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:41,082 - agent.ComputerAgent - INFO - Computer: click({'x': 605, 'y': 527})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 605, 'y': 527})\n", - "2025-08-11 18:37:41,718 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m18:37:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5215/7340 [191:23<77:59, 27.2 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:37:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:42,379 - agent.ComputerAgent - INFO - Computer: click({'x': 525, 'y': 502})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 525, 'y': 502})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5216/7340 [191:25<77:56, 27.2 steps/min]\u001b[92m18:37:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:37:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:44,917 - agent.ComputerAgent - INFO - Computer: click({'x': 652, 'y': 139})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 652, 'y': 139})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5217/7340 [191:26<77:54, 27.3 steps/min]\u001b[92m18:37:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:45,530 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': -1169, 'scroll_x': 0, 'x': 526, 'y': 427})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': -1169, 'scroll_x': 0, 'x': 526, 'y': 427})\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5219/7340 [191:29<77:49, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3077c8ef-543a-4fa8-b46c-49b632230eed/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5219/7340 [191:30<77:49, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9b006d7b-b853-41ed-8a84-b7eaa5b6e94b/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:50,126 - agent.ComputerAgent - INFO - Computer: type({'text': 'Thunderbird'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Thunderbird'})\n", - "\u001b[92m18:37:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/1064657b-b89a-4eeb-8197-1c110af6b752/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5219/7340 [191:31<77:50, 27.2 steps/min]2025-08-11 18:37:50,787 - agent.ComputerAgent - INFO - Computer: click({'x': 749, 'y': 440})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 749, 'y': 440})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/055914cd-07b0-4dcd-9407-c6975b1eccbf/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:37:51,454 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m18:37:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3077c8ef-543a-4fa8-b46c-49b632230eed/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5220/7340 [191:34<77:48, 27.2 steps/min]\u001b[92m18:37:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:53,444 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m18:37:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:37:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:37:54,116 - agent.ComputerAgent - INFO - Computer: click({'x': 1008, 'y': 10})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1008, 'y': 10})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a72854f0-3bb0-4711-a18e-7a467a56390e/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/3077c8ef-543a-4fa8-b46c-49b632230eed/close \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5223/7340 [191:35<77:39, 27.3 steps/min]\u001b[92m18:37:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:55,819 - agent.ComputerAgent - INFO - Computer: click({'x': 80, 'y': 430})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 80, 'y': 430})\n", - "2025-08-11 18:37:56,505 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:37:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1fef1c7a-93ef-4a63-b067-399dfc4ff08a/invoke \"HTTP/1.1 200 OK\"\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5224/7340 [191:38<77:37, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:37:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:37:57,824 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m18:37:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ------------| 5225/7340 [191:39<77:34, 27.3 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/e42da596-e101-4fd3-9dea-8a1d63615dad/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af5d4b08-d761-4bf8-a2c7-7ce16ed98ab9/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Settings > On startup.\n", - "- Select β€œOpen the New Tab page,” or β€œContinue where you left off.”\n", - "- If β€œOpen a specific page or set of pages” is selected, remove funbrain.com (click the three dots beside it > Remove).\n", - "- Also check Settings > Appearance > Show home button. If enabled, set it to β€œNew Tab page” and make sure funbrain.com isn’t listed there.\n", - "\n", - "2) Unpin any pinned tab\n", - "- If you see a tiny pin icon on the funbrain tab, right‑click the tab > Unpin. Close it.\n", - "\n", - "3) Remove any extension hijacking your homepage/new tab\n", - "- Go to chrome://extensions\n", - "- Toggle off or Remove any unknown extensions, especially ones referencing β€œNew Tab,” β€œHome,” β€œSearch,” or anything you don’t recognize.\n", - "- Restart Chrome.\n", - "\n", - "4) Check if Chrome is β€œManaged by your organization” (policy hijack)\n", - "- Go to chrome://policy\n", - "- If you see policies like HomepageLocation, NewTabPageLocation, RestoreOnStartup, or StartupURLs pointing to funbrain.com (and this is your personal device), remove the policies:\n", - " - Press Windows key, type β€œregedit”, open Registry Editor.\n", - " - Navigate to:\n", - " - HKEY_CURRENT_USER\\Software\\Policies\\Google\\Chrome\n", - " - HKEY_LOCAL_MACHINE\\SOFTWARE\\Policies\\Google\\Chrome\n", - " - Delete values like HomepageLocation, StartupURLs, RestoreOnStartup, NewTabPageLocation, or delete the Chrome key entirely if it only contains unwanted values.\n", - " - Easier method (PowerShell as Administrator):\n", - " Remove-Item -Path \"HKCU:\\Software\\Policies\\Google\\Chrome\" -Recurse -Force -ErrorAction SilentlyContinue\n", - " Remove-Item -Path \"HKLM:\\SOFTWARE\\Policies\\Google\\Chrome\" -Recurse -Force -ErrorAction SilentlyContinue\n", - " - Reboot and recheck chrome://policy (it should be empty or unrelated).\n", - "\n", - "5) Fix the shortcut that launches Chrome\n", - "- Right‑click the Chrome shortcut you normall...\n", - "INFO:agent.ComputerAgent:Agent: This isn’t a cache issue. Chrome is being told to open that site by a setting, an extension/policy, or a modified shortcut. Do the steps below in order (quickest first). They’re written for Windows on your Surface Pro.\n", - "\n", - "1) Fix Chrome’s startup settings\n", - "- Open Chrome.\n", - "- Menu (three dots) > Settings > On startup.\n", - "- Select β€œOpen the New Tab page,” or β€œContinue where you left off.”\n", - "- If β€œOpen a specific page or set of pages” is selected, remove funbrain.com (click the three dots beside it > Remove).\n", - "- Also check Settings > Appearance > Show home button. If enabled, set it to β€œNew Tab page” and make sure funbrain.com isn’t listed there.\n", - "\n", - "2) Unpin any pinned tab\n", - "- If you see a tiny pin icon on the funbrain tab, right‑click the tab > Unpin. Close it.\n", - "\n", - "3) Remove any extension hijacking your homepage/new tab\n", - "- Go to chrome://extensions\n", - "- Toggle off or Remove any unknown extensions, especially ones referencing β€œNew Tab,” β€œHome,” β€œSearch,” or anything you don’t recognize.\n", - "- Restart Chrome.\n", - "\n", - "4) Check if Chrome is β€œManaged by your organization” (policy hijack)\n", - "- Go to chrome://policy\n", - "- If you see policies like HomepageLocation, NewTabPageLocation, RestoreOnStartup, or StartupURLs pointing to funbrain.com (and this is your personal device), remove the policies:\n", - " - Press Windows key, type β€œregedit”, open Registry Editor.\n", - " - Navigate to:\n", - " - HKEY_CURRENT_USER\\Software\\Policies\\Google\\Chrome\n", - " - HKEY_LOCAL_MACHINE\\SOFTWARE\\Policies\\Google\\Chrome\n", - " - Delete values like HomepageLocation, StartupURLs, RestoreOnStartup, NewTabPageLocation, or delete the Chrome key entirely if it only contains unwanted values.\n", - " - Easier method (PowerShell as Administrator):\n", - " Remove-Item -Path \"HKCU:\\Software\\Policies\\Google\\Chrome\" -Recurse -Force -ErrorAction SilentlyContinue\n", - " Remove-Item -Path \"HKLM:\\SOFTWARE\\Policies\\Google\\Chrome\" -Recurse -Force -ErrorAction SilentlyContinue\n", - " - Reboot and recheck chrome://policy (it should be empty or unrelated).\n", - "\n", - "5) Fix the shortcut that launches Chrome\n", - "- Right‑click the Chrome shortcut you normall...\n", - "2025-08-11 18:49:11,237 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 2424\n", - " - prompt_tokens: 1953\n", - " - total_tokens: 4377\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1600\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0267\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 2424\n", - " - prompt_tokens: 1953\n", - " - total_tokens: 4377\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1600\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0267\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/497d5104-1e6e-44a9-a164-fec745a337b6/invoke \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5568/7340 [202:52<64:34, 27.4 steps/min]2025-08-11 18:49:11,903 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m18:49:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 18:49:12,597 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m18:49:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0ce9d177-2b9a-4fde-a8a5-eb1b59248c8f/invoke \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5568/7340 [202:54<64:34, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:49:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:49:13,963 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m18:49:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5568/7340 [202:55<64:34, 27.4 steps/min]\u001b[92m18:49:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:49:14,626 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 18, 'y': 237})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 18, 'y': 237})\n", - "2025-08-11 18:49:16,032 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m18:49:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8a9dd85a-f951-495e-aea0-d3864853591e/invoke \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5568/7340 [202:57<64:35, 27.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:49:17,933 - agent.ComputerAgent - INFO - Computer: type({'text': 'ICLR 2013 location'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'ICLR 2013 location'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:49:19,279 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5569/7340 [203:01<64:33, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8a9dd85a-f951-495e-aea0-d3864853591e/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:49:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:49:21,241 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 18:49:21,241 - agent.ComputerAgent - INFO - Agent: Yes. VLC is set by default to reuse the first instance when you open a file (especially from the file manager). Turn that off and you can run multiple, independent players.\n", - "\n", - "Do this:\n", - "- Open VLC.\n", - "- Go to Tools -> Preferences (Ctrl+P).\n", - "- In the Simple settings view on the Interface tab, find Playlist and Instances.\n", - "- Uncheck:\n", - " - Allow only one instance\n", - " - Use only one instance when started from file manager\n", - " - Enqueue items into playlist in one instance (if you see it)\n", - "- Click Save.\n", - "- Quit all VLC windows completely (Ctrl+Q) so the setting takes effect, then start VLC again and open a different file in each window.\n", - "\n", - "Command-line alternative (Windows/macOS/Linux):\n", - "- Start each player with:\n", - " - vlc --no-one-instance --no-one-instance-when-started-from-file-manager file1.mp4\n", - " - vlc --no-one-instance file2.mp4\n", - "\n", - "After this, each VLC window will play its own video simultaneously.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: Yes. VLC is set by default to reuse the first instance when you open a file (especially from the file manager). Turn that off and you can run multiple, independent players.\n", - "\n", - "Do this:\n", - "- Open VLC.\n", - "- Go to Tools -> Preferences (Ctrl+P).\n", - "- In the Simple settings view on the Interface tab, find Playlist and Instances.\n", - "- Uncheck:\n", - " - Allow only one instance\n", - " - Use only one instance when started from file manager\n", - " - Enqueue items into playlist in one instance (if you see it)\n", - "- Click Save.\n", - "- Quit all VLC windows completely (Ctrl+Q) so the setting takes effect, then start VLC again and open a different file in each window.\n", - "\n", - "Command-line alternative (Windows/macOS/Linux):\n", - "- Start each player with:\n", - " - vlc --no-one-instance --no-one-instance-when-started-from-file-manager file1.mp4\n", - " - vlc --no-one-instance file2.mp4\n", - "\n", - "After this, each VLC window will play its own video simultaneously.\n", - "\n", - "Task completed\n", - "2025-08-11 18:49:21,909 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 1307\n", - " - prompt_tokens: 1975\n", - " - total_tokens: 3282\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1088\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0155\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 1307\n", - " - prompt_tokens: 1975\n", - " - total_tokens: 3282\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1088\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0155\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m18:49:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5590/7340 [203:04<63:34, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/35ee5a0d-d8f9-4419-b253-d394f35fe993/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:49:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m18:49:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/0835b0ab-8369-4209-a85b-955dbb271b54/reset \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:49:23,196 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m18:49:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m18:49:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 18:49:23,915 - agent.ComputerAgent - INFO - Computer: click({'x': 109, 'y': 125})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 109, 'y': 125})\n", - "\u001b[92m18:49:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8a9dd85a-f951-495e-aea0-d3864853591e/close \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/655a0f34-fb5e-49f8-9a65-531af668d6c6/close \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5591/7340 [203:05<63:31, 27.5 steps/min]2025-08-11 18:49:24,590 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 75, 'y': 177}, {'x': 278, 'y': 177}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 75, 'y': 177}, {'x': 278, 'y': 177}]})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa800986-7030-4845-b4a1-82119abb97e9/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/84265bb9-b6f6-479e-8a58-920cfa2b7c69/invoke \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5592/7340 [203:07<63:29, 27.5 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5600/7340 [203:08<63:07, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:49:28,277 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+c'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+c'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0835b0ab-8369-4209-a85b-955dbb271b54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/a72854f0-3bb0-4711-a18e-7a467a56390e/invoke \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5600/7340 [203:10<63:07, 27.6 steps/min]2025-08-11 18:49:28,957 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m18:49:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:49:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 18:49:30,327 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m18:49:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m18:49:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/84265bb9-b6f6-479e-8a58-920cfa2b7c69/close \"HTTP/1.1 200 OK\"\n", - " 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----------| 5600/7340 [203:12<63:08, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1064657b-b89a-4eeb-8197-1c110af6b752/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/584f1ba5-3dc8-4b11-9242-7100c4e1133e/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/aa800986-7030-4845-b4a1-82119abb97e9/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 18:49:32,369 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m18:49:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 log.txt\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'cd ~/Desktop\\npython3 calculator.py > log.txt\\n'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/10c454d7-987f-4a23-83d6-534bd9ba42c2/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 83%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-------| 6089/7340 [220:38<45:19, 27.6 steps/min]\u001b[92m19:06:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b641dbb7-3e3c-437d-bc11-5e038171855d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/43a383a0-163d-4a8b-8494-0e1d1eab6cd6/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/27fc4825-1617-494a-9308-b128bd8af05a/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:06:57,016 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:06:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:06:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/80d19a15-b1ca-43cc-8d1b-1f86242172b5/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 Modify > Font > choose Liberation Sans Narrow > OK.\n", - "- Drawing styles (for manual text boxes):\n", - " - Click the Drawing Styles icon.\n", - " - Right‑click Default > Modify > Font > Liberation Sans Narrow > OK.\n", - "- Remove any direct overrides so objects follow the styles:\n", - " - In the Slides pane, select all slides (click first, Shift‑click last).\n", - " - Press Ctrl+M (Format > Clear Direct Formatting). This makes existing text boxes pick up the new style font.\n", - "\n", - "Method 2: One‑click change with a macro (works even if text has direct formatting)\n", - "- Tools > Macros > Organize Macros > LibreOffice Basic > select your presentation > New.\n", - "- Paste this macro into the new module and Run:\n", - "\n", - "Sub SetAllTextToLiberationNarrow\n", - " Dim oDoc As Object, oSlides As Object, oSlide As Object\n", - " Dim oShape As Object\n", - " oDoc = ThisComponent\n", - " oSlides = oDoc.getDrawPages()\n", - " Dim i As Long, j As Long\n", - " For i = 0 To oSlides.getCount() - 1\n", - " oSlide = oSlides.getByIndex(i)\n", - " For j = 0 To oSlide.getCount() - 1\n", - " oShape = oSlide.getByIndex(j)\n", - " Call SetFontRecursive(oShape)\n", - " Next j\n", - " Next i\n", - "End Sub\n", - "\n", - "Sub SetFontRecursive(oShape As Object)\n", - " If oShape.supportsService(\"com.sun.star.drawing.GroupShape\") Then\n", - " Dim k As Long\n", - " For k = 0 To oShape.getCount() - 1\n", - " Call SetFontRecursive(oShape.getByIndex(k))\n", - " Next k\n", - " ElseIf oShape.supportsService(\"com.sun.star.drawing.Text\") Then\n", - " Dim oText As Object, oCur As Object\n", - " oText = oShape.getText()\n", - " oCur = oText.createTextCursor()\n", - " oCur.CharFontName = \"Liberation Sans Narrow\"\n", - " oCur.CharF...\n", - "INFO:agent.ComputerAgent:Agent: Yesβ€”here are reliable ways to standardize every text box to Liberation Sans Narrow in LibreOffice Impress without clicking each one.\n", - "\n", - "Method 1 (recommended): Change styles so all objects inherit the font\n", - "- Open your presentation.\n", - "- Press F11 to open the Styles deck.\n", - "- Presentation styles (for placeholders):\n", - " - Click the Presentation Styles icon.\n", - " - Right‑click each of Title, Subtitle, Outline 1–10 (or Outline), Notes, etc. > Modify > Font > choose Liberation Sans Narrow > OK.\n", - "- Drawing styles (for manual text boxes):\n", - " - Click the Drawing Styles icon.\n", - " - Right‑click Default > Modify > Font > Liberation Sans Narrow > OK.\n", - "- Remove any direct overrides so objects follow the styles:\n", - " - In the Slides pane, select all slides (click first, Shift‑click last).\n", - " - Press Ctrl+M (Format > Clear Direct Formatting). This makes existing text boxes pick up the new style font.\n", - "\n", - "Method 2: One‑click change with a macro (works even if text has direct formatting)\n", - "- Tools > Macros > Organize Macros > LibreOffice Basic > select your presentation > New.\n", - "- Paste this macro into the new module and Run:\n", - "\n", - "Sub SetAllTextToLiberationNarrow\n", - " Dim oDoc As Object, oSlides As Object, oSlide As Object\n", - " Dim oShape As Object\n", - " oDoc = ThisComponent\n", - " oSlides = oDoc.getDrawPages()\n", - " Dim i As Long, j As Long\n", - " For i = 0 To oSlides.getCount() - 1\n", - " oSlide = oSlides.getByIndex(i)\n", - " For j = 0 To oSlide.getCount() - 1\n", - " oShape = oSlide.getByIndex(j)\n", - " Call SetFontRecursive(oShape)\n", - " Next j\n", - " Next i\n", - "End Sub\n", - "\n", - "Sub SetFontRecursive(oShape As Object)\n", - " If oShape.supportsService(\"com.sun.star.drawing.GroupShape\") Then\n", - " Dim k As Long\n", - " For k = 0 To oShape.getCount() - 1\n", - " Call SetFontRecursive(oShape.getByIndex(k))\n", - " Next k\n", - " ElseIf oShape.supportsService(\"com.sun.star.drawing.Text\") Then\n", - " Dim oText As Object, oCur As Object\n", - " oText = oShape.getText()\n", - " oCur = oText.createTextCursor()\n", - " oCur.CharFontName = \"Liberation Sans Narrow\"\n", - " oCur.CharF...\n", - "2025-08-11 19:26:25,833 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 3294\n", - " - prompt_tokens: 1964\n", - " - total_tokens: 5258\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 2560\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0354\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 3294\n", - " - prompt_tokens: 1964\n", - " - total_tokens: 5258\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 2560\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0354\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a2c2835-d21e-4e04-babb-e8305a4f1f9d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f055f32-c606-4a58-91bc-c5990d4e8280/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f209659e-2ed9-40fa-ae61-5359fb9ef290/invoke \"HTTP/1.1 200 OK\"\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6611/7340 [240:07<26:28, 27.5 steps/min]2025-08-11 19:26:26,517 - agent.ComputerAgent - INFO - Computer: double_click({'x': 884, 'y': 123})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 884, 'y': 123})\n", - "2025-08-11 19:26:27,223 - agent.ComputerAgent - INFO - Computer: click({'x': 344, 'y': 34})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 344, 'y': 34})\n", - "2025-08-11 19:26:27,897 - agent.ComputerAgent - INFO - Computer: click({'x': 548, 'y': 249})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 548, 'y': 249})\n", - "2025-08-11 19:26:28,563 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:26:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6613/7340 [240:10<26:24, 27.5 steps/min]2025-08-11 19:26:29,242 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:26:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:26:30,582 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'f1'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'f1'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:26:31,909 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6616/7340 [240:13<26:17, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:26:33,178 - agent.ComputerAgent - INFO - Computer: screenshot({})\n", - "INFO:agent.ComputerAgent:Computer: screenshot({})\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6618/7340 [240:14<26:12, 27.5 steps/min]2025-08-11 19:26:33,810 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:26:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:26:34,481 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:26:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/09c933ad-61bf-4498-b248-0df86e3aea78/invoke \"HTTP/1.1 200 OK\"\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6619/7340 [240:16<26:10, 27.5 steps/min]2025-08-11 19:26:35,113 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:26:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:26:35,982 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:26:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f4008ee-6c98-4905-9ade-965ea7842b64/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6619/7340 [240:17<26:10, 27.5 steps/min]2025-08-11 19:26:37,325 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:26:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/18debd9e-6c58-4504-8a04-13cba683a254/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:26:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6619/7340 [240:19<26:10, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2d8a6e51-acdb-47b9-8ee4-f3085c741fd5/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:26:38,682 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:26:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/bcec4523-df7a-48b5-aea1-8d7c632a6dc4/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51954fb4-34ed-4511-b2fd-a6169b5ea5d3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9a544504-3e48-48b2-8429-0a97e266ebfb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/84a5d283-63f1-43fc-b483-76116d67f385/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c915dbd9-32bc-40a7-9c07-d437c737419f/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:26:39,380 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:26:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:26:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/775a5b67-2406-42b8-86e5-243e01b8dc27/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/052ac585-1998-46b2-9ac5-0dc192aeba02/invoke \"HTTP/1.1 200 OK\"\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6619/7340 [240:21<26:10, 27.5 steps/min]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 19:26:40,063 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:26:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:26:40,762 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 344, 'y': 137})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 344, 'y': 137})\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6619/7340 [240:22<26:11, 27.5 steps/min]2025-08-11 19:26:41,401 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:26:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:26:42,073 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:26:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:26:42,723 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:26:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6620/7340 [240:24<26:08, 27.5 steps/min]2025-08-11 19:26:43,402 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:26:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:26:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:26:45,407 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:26:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:26:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6620/7340 [240:29<26:09, 27.5 steps/min]\u001b[92m19:26:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:26:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:26:48,083 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:26:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:26:48,753 - agent.ComputerAgent - INFO - Computer: click({'x': 914, 'y': 660})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 914, 'y': 660})\n", - "\u001b[92m19:26:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6621/7340 [240:31<26:07, 27.5 steps/min]\u001b[92m19:26:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:26:50,093 - agent.ComputerAgent - INFO - Computer: click({'x': 641, 'y': 498})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 641, 'y': 498})\n", - "\u001b[92m19:26:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:26:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:26:50,755 - agent.ComputerAgent - INFO - Computer: click({'x': 893, 'y': 134})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 893, 'y': 134})\n", - "2025-08-11 19:26:51,455 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:26:51,456 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 386, 'y': 250})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 386, 'y': 250})\n", - "\u001b[92m19:26:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6622/7340 [240:33<26:04, 27.5 steps/min]2025-08-11 19:26:52,153 - agent.ComputerAgent - INFO - Computer: click({'x': 173, 'y': 150})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 173, 'y': 150})\n", - "2025-08-11 19:26:52,821 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:26:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6626/7340 [240:35<25:55, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:26:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6626/7340 [240:36<25:55, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77892268-14f2-4dfa-b58c-6a682f258679/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:26:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:26:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6626/7340 [240:37<25:55, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:26:56,855 - agent.ComputerAgent - INFO - Computer: click({'x': 46, 'y': 528})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 46, 'y': 528})\n", - "\u001b[92m19:26:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:26:57,527 - agent.ComputerAgent - INFO - Computer: click({'x': 731, 'y': 617})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 731, 'y': 617})\n", - "2025-08-11 19:26:58,183 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:26:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6626/7340 [240:39<25:56, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a2c2835-d21e-4e04-babb-e8305a4f1f9d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f055f32-c606-4a58-91bc-c5990d4e8280/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:26:58,873 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:26:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/73c70c0d-c1a0-401f-83c0-063e983abd6c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f209659e-2ed9-40fa-ae61-5359fb9ef290/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:26:59,543 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:26:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:27:00,884 - agent.ComputerAgent - INFO - Computer: type({'text': '20'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '20'})\n", - " 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6628/7340 [240:42<25:51, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f4008ee-6c98-4905-9ade-965ea7842b64/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:27:01,553 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:27:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:27:02,260 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:27:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:27:02,945 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:27:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f4008ee-6c98-4905-9ade-965ea7842b64/close \"HTTP/1.1 200 OK\"\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6648/7340 [240:44<25:03, 27.6 steps/min]2025-08-11 19:27:04,256 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:27:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6648/7340 [240:46<25:03, 27.6 steps/min]INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/2d8a6e51-acdb-47b9-8ee4-f3085c741fd5/invoke \"HTTP/1.1 200 OK\"\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6648/7340 [240:47<25:03, 27.6 steps/min]2025-08-11 19:27:05,962 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:27:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6648/7340 [240:48<25:03, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:27:07,803 - agent.ComputerAgent - INFO - Computer: type({'text': 'Extensions: Install from VSIX'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Extensions: Install from VSIX'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/775a5b67-2406-42b8-86e5-243e01b8dc27/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:27:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:27:09,824 - agent.ComputerAgent - INFO - Computer: get_current_url({})\n", - "INFO:agent.ComputerAgent:Computer: get_current_url({})\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6648/7340 [240:51<25:04, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/18debd9e-6c58-4504-8a04-13cba683a254/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 View > Folders > Unified.\n", - "- In the left sidebar you’ll now see a β€œUnified Folders” section with a single β€œInbox” that aggregates all accounts.\n", - "\n", - "Optional: choose exactly which accounts/folders are included\n", - "- In the left sidebar, under Unified Folders, right‑click Inbox > Properties.\n", - "- Click β€œSelect the folders to search,” then check the Inbox for each account you want included. Click Update.\n", - "\n", - "Notes and tips\n", - "- Thunderbird remembers this view; if it ever changes, repeat View > Folders > Unified.\n", - "- If you use POP and want all mail to physically go to one inbox, set a Global Inbox: Account Settings > your POP account > Server Settings > Advanced > Global Inbox (Local Folders).\n", - "- If a folder is missing from the Unified view, right‑click that folder > Subscribe (for IMAP) to ensure it’s subscribed, and confirm it’s checked in the Unified Inbox Properties.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: Here’s how to set up a unified inbox in Thunderbird so all your accounts’ emails show in one place:\n", - "\n", - "- Open Thunderbird.\n", - "- Go to the application menu (three horizontal lines in the top-right) > View > Folders > Unified.\n", - "- In the left sidebar you’ll now see a β€œUnified Folders” section with a single β€œInbox” that aggregates all accounts.\n", - "\n", - "Optional: choose exactly which accounts/folders are included\n", - "- In the left sidebar, under Unified Folders, right‑click Inbox > Properties.\n", - "- Click β€œSelect the folders to search,” then check the Inbox for each account you want included. Click Update.\n", - "\n", - "Notes and tips\n", - "- Thunderbird remembers this view; if it ever changes, repeat View > Folders > Unified.\n", - "- If you use POP and want all mail to physically go to one inbox, set a Global Inbox: Account Settings > your POP account > Server Settings > Advanced > Global Inbox (Local Folders).\n", - "- If a folder is missing from the Unified view, right‑click that folder > Subscribe (for IMAP) to ensure it’s subscribed, and confirm it’s checked in the Unified Inbox Properties.\n", - "\n", - "Task completed\n", - "Loading checkpoint shards: 25%|β–ˆβ–ˆβ–Œ | 1/4 [00:01<00:05, 1.72s/it]2025-08-11 19:29:25,906 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 1266\n", - " - prompt_tokens: 12578\n", - " - total_tokens: 13844\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1024\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 11776\n", - " - response_cost: $0.0151\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 1266\n", - " - prompt_tokens: 12578\n", - " - total_tokens: 13844\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 1024\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 11776\n", - " - response_cost: $0.0151\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:29:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 2/4 [00:03<00:03, 1.67s/it]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:29:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6708/7340 [243:09<22:54, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 3/4 [00:04<00:01, 1.64s/it]2025-08-11 19:29:29,309 - agent.ComputerAgent - INFO - Computer: type({'text': 'sar -V\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'sar -V\\n'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.38s/it]\n", - "\u001b[92m19:29:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6709/7340 [243:11<22:52, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:29:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6710/7340 [243:13<22:50, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/73c70c0d-c1a0-401f-83c0-063e983abd6c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f055f32-c606-4a58-91bc-c5990d4e8280/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:32,337 - agent.ComputerAgent - INFO - LLM processing started with 29 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 29 messages\n", - "\u001b[92m19:29:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:33,023 - agent.ComputerAgent - INFO - Computer: click({'x': 115, 'y': 635})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 115, 'y': 635})\n", - "\u001b[92m19:29:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6710/7340 [243:14<22:50, 27.6 steps/min]\u001b[92m19:29:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:33,677 - agent.ComputerAgent - INFO - Computer: click({'x': 28, 'y': 739})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 28, 'y': 739})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:34,335 - agent.ComputerAgent - INFO - Computer: click({'x': 530, 'y': 417})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 530, 'y': 417})\n", - "\u001b[92m19:29:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:34,988 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:29:34,989 - agent.ComputerAgent - INFO - Computer: click({'x': 80, 'y': 181})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 80, 'y': 181})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6711/7340 [243:17<22:48, 27.6 steps/min]\u001b[92m19:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c915dbd9-32bc-40a7-9c07-d437c737419f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/775a5b67-2406-42b8-86e5-243e01b8dc27/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/18debd9e-6c58-4504-8a04-13cba683a254/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a2c2835-d21e-4e04-babb-e8305a4f1f9d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/77892268-14f2-4dfa-b58c-6a682f258679/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:36,366 - agent.ComputerAgent - INFO - Computer: click({'x': 186, 'y': 148})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 186, 'y': 148})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:29:36,994 - agent.ComputerAgent - INFO - Computer: click({'x': 85, 'y': 234})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 85, 'y': 234})\n", - "\u001b[92m19:29:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:29:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 91%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6714/7340 [243:18<22:41, 27.6 steps/min]2025-08-11 19:29:37,669 - agent.ComputerAgent - INFO - Computer: click({'x': 483, 'y': 267})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 483, 'y': 267})\n", - "2025-08-11 19:29:38,353 - agent.ComputerAgent - INFO - Computer: click({'x': 974, 'y': 34})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 974, 'y': 34})\n", - "2025-08-11 19:29:38,999 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:29:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:29:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6717/7340 [243:20<22:34, 27.6 steps/min]2025-08-11 19:29:39,730 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 914, 'y': 671}, {'x': 984, 'y': 467}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 914, 'y': 671}, {'x': 984, 'y': 467}]})\n", - "2025-08-11 19:29:40,390 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:29:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6719/7340 [243:22<22:29, 27.6 steps/min]2025-08-11 19:29:41,077 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:29:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:29:42,161 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:29:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/73c70c0d-c1a0-401f-83c0-063e983abd6c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f055f32-c606-4a58-91bc-c5990d4e8280/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/18debd9e-6c58-4504-8a04-13cba683a254/close \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6720/7340 [243:23<22:27, 27.6 steps/min]2025-08-11 19:29:42,846 - agent.ComputerAgent - INFO - LLM processing started with 31 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 31 messages\n", - "\u001b[92m19:29:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6725/7340 [243:26<22:15, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:29:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6725/7340 [243:27<22:15, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/052ac585-1998-46b2-9ac5-0dc192aeba02/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f055f32-c606-4a58-91bc-c5990d4e8280/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/963f0b0a-47d1-479c-9077-6c59023108fe/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/84a5d283-63f1-43fc-b483-76116d67f385/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f209659e-2ed9-40fa-ae61-5359fb9ef290/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/bcec4523-df7a-48b5-aea1-8d7c632a6dc4/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9a544504-3e48-48b2-8429-0a97e266ebfb/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/invoke \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6725/7340 [243:28<22:15, 27.6 steps/min]2025-08-11 19:29:48,280 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:29:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00 System_Resources_Report.txt\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'sar -u 1 30 > System_Resources_Report.txt\\n'})\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6784/7340 [245:45<20:08, 27.6 steps/min]2025-08-11 19:32:03,959 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:32:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:04,639 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:32:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c915dbd9-32bc-40a7-9c07-d437c737419f/invoke \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:46<20:06, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7633715b-dde0-4c56-a4b6-22ccee78f5f5/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/963f0b0a-47d1-479c-9077-6c59023108fe/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:05,820 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:32:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:47<20:06, 27.6 steps/min]2025-08-11 19:32:06,520 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:32:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:48<20:06, 27.6 steps/min]2025-08-11 19:32:07,704 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:32:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:08,380 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:32:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:50<20:06, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51954fb4-34ed-4511-b2fd-a6169b5ea5d3/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:10,389 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:32:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/775a5b67-2406-42b8-86e5-243e01b8dc27/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:53<20:06, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:32:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:12,779 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:32:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:32:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:54<20:06, 27.6 steps/min]2025-08-11 19:32:13,458 - agent.ComputerAgent - INFO - Computer: click({'x': 461, 'y': 169})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 461, 'y': 169})\n", - "\u001b[92m19:32:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:14,530 - agent.ComputerAgent - INFO - Computer: click({'x': 125, 'y': 182})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 125, 'y': 182})\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6785/7340 [245:56<20:07, 27.6 steps/min]2025-08-11 19:32:15,198 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:32:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:16,538 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'win'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'win'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6787/7340 [245:58<20:02, 27.6 steps/min]\u001b[92m19:32:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:19,209 - agent.ComputerAgent - INFO - Computer: type({'text': 'Dublin'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Dublin'})\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6788/7340 [246:00<20:00, 27.6 steps/min]\u001b[92m19:32:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:19,877 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:32:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:20,569 - agent.ComputerAgent - INFO - Computer: click({'x': 90, 'y': 183})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 90, 'y': 183})\n", - "\u001b[92m19:32:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:21,243 - agent.ComputerAgent - INFO - Computer: click({'x': 430, 'y': 219})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 430, 'y': 219})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ----| 6789/7340 [246:04<19:58, 27.6 steps/min]\u001b[92m19:32:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:32:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:32:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:23,644 - agent.ComputerAgent - INFO - Computer: click({'x': 188, 'y': 190})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 188, 'y': 190})\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6791/7340 [246:05<19:53, 27.6 steps/min]\u001b[92m19:32:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:24,280 - agent.ComputerAgent - INFO - Computer: click({'x': 123, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 123, 'y': 178})\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6793/7340 [246:07<19:49, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6793/7340 [246:08<19:49, 27.6 steps/min]\u001b[92m19:32:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:27,608 - agent.ComputerAgent - INFO - Computer: click({'x': 18, 'y': 476})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 18, 'y': 476})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b928bd01-f1b7-4f34-accf-acb6aec5d8cd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a2c2835-d21e-4e04-babb-e8305a4f1f9d/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6793/7340 [246:09<19:49, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:28,295 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:32:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7633715b-dde0-4c56-a4b6-22ccee78f5f5/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:28,953 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:32:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6794/7340 [246:10<19:47, 27.6 steps/min]2025-08-11 19:32:29,623 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:32:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/963f0b0a-47d1-479c-9077-6c59023108fe/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f209659e-2ed9-40fa-ae61-5359fb9ef290/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6794/7340 [246:12<19:47, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:30,976 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:32:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:31,628 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:32:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:32:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6794/7340 [246:13<19:47, 27.6 steps/min]2025-08-11 19:32:32,293 - agent.ComputerAgent - INFO - Computer: click({'x': 534, 'y': 554})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 534, 'y': 554})\n", - "2025-08-11 19:32:32,937 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:32:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6794/7340 [246:14<19:47, 27.6 steps/min]2025-08-11 19:32:33,607 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:32:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:34,249 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:32:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6795/7340 [246:17<19:45, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:36,916 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:32:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6795/7340 [246:18<19:45, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6795/7340 [246:19<19:45, 27.6 steps/min]2025-08-11 19:32:38,805 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:32:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51c56274-d8ae-4edf-8ff1-b637cd2fff66/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:39,495 - agent.ComputerAgent - INFO - Computer: click({'x': 173, 'y': 149})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 173, 'y': 149})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6795/7340 [246:21<19:45, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:40,803 - agent.ComputerAgent - INFO - Computer: type({'text': 'wc -l System_Resources_Report.txt\\n'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'wc -l System_Resources_Report.txt\\n'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6796/7340 [246:23<19:43, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:42,099 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:32:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6797/7340 [246:24<19:41, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:32:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:44,597 - agent.ComputerAgent - INFO - Computer: double_click({'x': 482, 'y': 277})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 482, 'y': 277})\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6797/7340 [246:26<19:41, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:32:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:46,644 - agent.ComputerAgent - INFO - Computer: type({'text': '=SUM(Sheet1.B2:B11)'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '=SUM(Sheet1.B2:B11)'})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:47,299 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:32:47,300 - agent.ComputerAgent - INFO - Computer: click({'x': 578, 'y': 286})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 578, 'y': 286})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6798/7340 [246:29<19:39, 27.6 steps/min]\u001b[92m19:32:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:32:48,654 - agent.ComputerAgent - INFO - Computer: click({'x': 316, 'y': 416})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 316, 'y': 416})\n", - "\u001b[92m19:32:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/775a5b67-2406-42b8-86e5-243e01b8dc27/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c915dbd9-32bc-40a7-9c07-d437c737419f/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:49,313 - agent.ComputerAgent - INFO - Computer: click({'x': 306, 'y': 416})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 306, 'y': 416})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:32:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6800/7340 [246:31<19:34, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:50,681 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 254})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 254})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:32:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6802/7340 [246:33<19:30, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:52,640 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:32:52,640 - agent.ComputerAgent - INFO - Computer: click({'x': 14, 'y': 524})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 14, 'y': 524})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:54,035 - agent.ComputerAgent - INFO - Computer: type({'text': '=A2/1000000'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '=A2/1000000'})\n", - "\u001b[92m19:32:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:32:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6803/7340 [246:35<19:27, 27.6 steps/min]2025-08-11 19:32:54,689 - agent.ComputerAgent - INFO - Computer: click({'x': 19, 'y': 481})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 19, 'y': 481})\n", - "2025-08-11 19:32:55,406 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 193})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 193})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:32:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6805/7340 [246:37<19:23, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:32:56,702 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:32:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:57,376 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:32:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:32:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/963f0b0a-47d1-479c-9077-6c59023108fe/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6807/7340 [246:39<19:18, 27.6 steps/min]2025-08-11 19:32:58,080 - agent.ComputerAgent - INFO - Computer: click({'x': 343, 'y': 183})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 343, 'y': 183})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b928bd01-f1b7-4f34-accf-acb6aec5d8cd/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:32:58,735 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:32:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:32:59,441 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:32:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6807/7340 [246:41<19:18, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:33:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:33:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7633715b-dde0-4c56-a4b6-22ccee78f5f5/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6808/7340 [246:42<19:16, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:33:01,450 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:33:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a2c2835-d21e-4e04-babb-e8305a4f1f9d/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:33:02,154 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:33:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:33:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6808/7340 [246:43<19:16, 27.6 steps/min]2025-08-11 19:33:02,855 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:33:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:33:03,530 - agent.ComputerAgent - INFO - Computer: click({'x': 633, 'y': 473})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 633, 'y': 473})\n", - "\u001b[92m19:33:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51954fb4-34ed-4511-b2fd-a6169b5ea5d3/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6808/7340 [246:45<19:16, 27.6 steps/min]\u001b[92m19:33:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:33:04,220 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:33:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/f209659e-2ed9-40fa-ae61-5359fb9ef290/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:33:04,896 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:33:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:33:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6809/7340 [246:46<19:14, 27.6 steps/min]2025-08-11 19:33:05,586 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 424, 'y': 418}, {'x': 527, 'y': 226}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 424, 'y': 418}, {'x': 527, 'y': 226}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:33:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6809/7340 [246:48<19:14, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:33:06,917 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:33:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:33:07,597 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:33:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:33:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6810/7340 [246:49<19:12, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:33:08,272 - agent.ComputerAgent - INFO - Computer: click({'x': 946, 'y': 750})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 946, 'y': 750})\n", - "2025-08-11 19:33:08,957 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:33:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6811/7340 [246:51<19:10, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51954fb4-34ed-4511-b2fd-a6169b5ea5d3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6812/7340 [246:52<19:08, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51954fb4-34ed-4511-b2fd-a6169b5ea5d3/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:33:13,491 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6812/7340 [246:55<19:08, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:33:14,808 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:33:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b928bd01-f1b7-4f34-accf-acb6aec5d8cd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6813/7340 [246:57<19:06, 27.6 steps/min]2025-08-11 19:33:16,837 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:33:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:33:17,526 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:33:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6814/7340 [246:59<19:03, 27.6 steps/min]2025-08-11 19:33:18,187 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:33:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6814/7340 [247:00<19:04, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:33:20,027 - agent.ComputerAgent - INFO - Computer: click({'x': 89, 'y': 185})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 89, 'y': 185})\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6814/7340 [247:01<19:04, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:33:21,352 - agent.ComputerAgent - INFO - Computer: type({'text': 'find / -type f -name \"secret.docx\" 2>/dev/null'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'find / -type f -name \"secret.docx\" 2>/dev/null'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:33:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6815/7340 [247:03<19:01, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7a2c2835-d21e-4e04-babb-e8305a4f1f9d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/963f0b0a-47d1-479c-9077-6c59023108fe/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:33:22,639 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:33:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:33:23,336 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:33:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6816/7340 [247:05<18:59, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00/dev/null | head -n1); printf \"%s\\\\n\" \"$path\"; echo -n \"$path\" | xclip -selection clipboard 2>/dev/null || echo -n \"$path\" | wl-copy 2>/dev/null || true'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'path=$(find / -type f -name \"secret.docx\" 2>/dev/null | head -n1); printf \"%s\\\\n\" \"$path\"; echo -n \"$path\" | xclip -selection clipboard 2>/dev/null || echo -n \"$path\" | wl-copy 2>/dev/null || true'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 2/4 [00:03<00:03, 1.76s/it]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:37:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 3/4 [00:05<00:01, 1.72s/it]2025-08-11 19:37:50,401 - agent.ComputerAgent - INFO - Agent: The video has been converted to an MP3 and saved on the Desktop as β€œBaby Justin Bieber.mp3.” You can now play it anytime.\n", - "\n", - "Task completed\n", - "INFO:agent.ComputerAgent:Agent: The video has been converted to an MP3 and saved on the Desktop as β€œBaby Justin Bieber.mp3.” You can now play it anytime.\n", - "\n", - "Task completed\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.45s/it]\n", - "2025-08-11 19:37:51,060 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 488\n", - " - prompt_tokens: 12887\n", - " - total_tokens: 13375\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0210\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 488\n", - " - prompt_tokens: 12887\n", - " - total_tokens: 13375\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 448\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0210\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:37:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:37:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6944/7340 [251:34<14:20, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:37:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:37:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:37:53,761 - agent.ComputerAgent - INFO - Computer: click({'x': 515, 'y': 232})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 515, 'y': 232})\n", - "\u001b[92m19:37:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6945/7340 [251:35<14:18, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:37:54,423 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 640, 'scroll_x': 0, 'x': 989, 'y': 366})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 640, 'scroll_x': 0, 'x': 989, 'y': 366})\n", - "\u001b[92m19:37:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:37:55,120 - agent.ComputerAgent - INFO - Computer: double_click({'x': 94, 'y': 453})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 94, 'y': 453})\n", - "\u001b[92m19:37:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6946/7340 [251:36<14:16, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:37:55,799 - agent.ComputerAgent - INFO - Computer: click({'x': 153, 'y': 52})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 153, 'y': 52})\n", - "\u001b[92m19:37:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:37:56,468 - agent.ComputerAgent - INFO - Computer: double_click({'x': 354, 'y': 136})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 354, 'y': 136})\n", - "\u001b[92m19:37:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:37:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6948/7340 [251:38<14:11, 27.6 steps/min]2025-08-11 19:37:57,837 - agent.ComputerAgent - INFO - Computer: click({'x': 471, 'y': 205})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 471, 'y': 205})\n", - "\u001b[92m19:37:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:37:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:37:58,540 - agent.ComputerAgent - INFO - Computer: click({'x': 945, 'y': 500})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 945, 'y': 500})\n", - "2025-08-11 19:37:59,244 - agent.ComputerAgent - INFO - Computer: double_click({'x': 989, 'y': 713})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 989, 'y': 713})\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6950/7340 [251:40<14:07, 27.6 steps/min]\u001b[92m19:37:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:37:59,918 - agent.ComputerAgent - INFO - Computer: click({'x': 913, 'y': 184})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 913, 'y': 184})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:00,585 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:38:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6953/7340 [251:42<14:00, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6954/7340 [251:43<13:58, 27.6 steps/min]2025-08-11 19:38:02,285 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:38:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6954/7340 [251:44<13:58, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6954/7340 [251:45<13:58, 27.6 steps/min]2025-08-11 19:38:04,439 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:38:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51c56274-d8ae-4edf-8ff1-b637cd2fff66/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6954/7340 [251:46<13:58, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:05,613 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:38:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c915dbd9-32bc-40a7-9c07-d437c737419f/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:07,021 - agent.ComputerAgent - INFO - Agent: I configured your system to lock automatically when you’re away.\n", - "\n", - "What I set\n", - "- Enabled Automatic Screen Lock.\n", - "- Set Blank Screen Delay to 5 minutes, so after 5 minutes of inactivity the screen turns off and locks immediately.\n", - "- Kept β€œLock screen on Suspend” enabled, so any suspend will also require a password on wake.\n", - "\n", - "How to adjust later\n", - "- Settings > Privacy > Screen:\n", - " - Change β€œBlank Screen Delay” to your preferred timeout (e.g., 1–2 minutes for tighter security).\n", - " - β€œAutomatic Screen Lock Delay” is set to lock as soon as the screen turns off.\n", - "\n", - "Tip\n", - "- Press Super+L anytime to lock instantly.\n", - "\n", - "Task completed.\n", - "INFO:agent.ComputerAgent:Agent: I configured your system to lock automatically when you’re away.\n", - "\n", - "What I set\n", - "- Enabled Automatic Screen Lock.\n", - "- Set Blank Screen Delay to 5 minutes, so after 5 minutes of inactivity the screen turns off and locks immediately.\n", - "- Kept β€œLock screen on Suspend” enabled, so any suspend will also require a password on wake.\n", - "\n", - "How to adjust later\n", - "- Settings > Privacy > Screen:\n", - " - Change β€œBlank Screen Delay” to your preferred timeout (e.g., 1–2 minutes for tighter security).\n", - " - β€œAutomatic Screen Lock Delay” is set to lock as soon as the screen turns off.\n", - "\n", - "Tip\n", - "- Press Super+L anytime to lock instantly.\n", - "\n", - "Task completed.\n", - "2025-08-11 19:38:07,641 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 790\n", - " - prompt_tokens: 10768\n", - " - total_tokens: 11558\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 640\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0214\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 790\n", - " - prompt_tokens: 10768\n", - " - total_tokens: 11558\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 640\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 0\n", - " - response_cost: $0.0214\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6955/7340 [251:49<13:56, 27.6 steps/min]2025-08-11 19:38:08,315 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:38:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:38:09,672 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:38:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6955/7340 [251:52<13:56, 27.6 steps/min]\u001b[92m19:38:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:38:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:38:11,032 - agent.ComputerAgent - INFO - Computer: click({'x': 375, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 375, 'y': 75})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:38:11,731 - agent.ComputerAgent - INFO - Computer: click({'x': 210, 'y': 202})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 210, 'y': 202})\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6958/7340 [251:53<13:49, 27.6 steps/min]2025-08-11 19:38:12,415 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:38:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7633715b-dde0-4c56-a4b6-22ccee78f5f5/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:13,115 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:38:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6960/7340 [251:54<13:45, 27.6 steps/min]2025-08-11 19:38:13,805 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:38:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/af58ffed-65a3-4c4a-a9fe-5c940230627d/close \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:14,486 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:38:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:38:15,845 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:38:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6960/7340 [251:57<13:45, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:17,724 - agent.ComputerAgent - INFO - Computer: type({'text': 'Vienna'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Vienna'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7633715b-dde0-4c56-a4b6-22ccee78f5f5/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:19,084 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ENTER'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ENTER'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6960/7340 [252:00<13:45, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:20,825 - agent.ComputerAgent - INFO - Computer: click({'x': 342, 'y': 184})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 342, 'y': 184})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6969/7340 [252:02<13:25, 27.7 steps/min]2025-08-11 19:38:21,487 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:38:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:38:22,165 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:38:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ---| 6970/7340 [252:05<13:22, 27.6 steps/min]\u001b[92m19:38:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00/dev/null || echo 'Requesting sudo password if needed'\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"sudo -n true 2>/dev/null || echo 'Requesting sudo password if needed'\"})\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6973/7340 [252:29<13:17, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:38:49,132 - agent.ComputerAgent - INFO - Computer: click({'x': 178, 'y': 305})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 178, 'y': 305})\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6974/7340 [252:30<13:15, 27.6 steps/min]\u001b[92m19:38:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:38:49,800 - agent.ComputerAgent - INFO - Computer: click({'x': 932, 'y': 296})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 932, 'y': 296})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6975/7340 [252:32<13:12, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6976/7340 [252:33<13:10, 27.6 steps/min]\u001b[92m19:38:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:38:52,154 - agent.ComputerAgent - INFO - Computer: click({'x': 88, 'y': 313})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 88, 'y': 313})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51c56274-d8ae-4edf-8ff1-b637cd2fff66/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:38:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/fa081188-4859-4858-9d33-0f9675111182/reset \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6976/7340 [252:35<13:10, 27.6 steps/min]\u001b[92m19:38:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:38:54,176 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:38:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:38:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:38:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6977/7340 [252:36<13:08, 27.6 steps/min]2025-08-11 19:38:55,531 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:38:55,532 - agent.ComputerAgent - INFO - Computer: double_click({'x': 379, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 379, 'y': 105})\n", - "2025-08-11 19:38:56,196 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:38:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6977/7340 [252:37<13:08, 27.6 steps/min]2025-08-11 19:38:56,856 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:38:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:38:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:38:57,552 - agent.ComputerAgent - INFO - Computer: click({'x': 351, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 351, 'y': 75})\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6978/7340 [252:39<13:06, 27.6 steps/min]2025-08-11 19:38:58,981 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:38:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6979/7340 [252:40<13:04, 27.6 steps/min]2025-08-11 19:38:59,669 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:38:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:38:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:38:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6979/7340 [252:42<13:04, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:02,357 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:39:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:39:03,722 - agent.ComputerAgent - INFO - Computer: wait({})\n", - "INFO:agent.ComputerAgent:Computer: wait({})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6979/7340 [252:45<13:04, 27.6 steps/min]2025-08-11 19:39:04,403 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 640, 'scroll_x': 0, 'x': 993, 'y': 732})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 640, 'scroll_x': 0, 'x': 993, 'y': 732})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:05,068 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:39:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:39:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6980/7340 [252:46<13:02, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:39:06,128 - agent.ComputerAgent - INFO - Computer: click({'x': 316, 'y': 183})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 316, 'y': 183})\n", - "2025-08-11 19:39:06,806 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:39:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:08,174 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ENTER'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ENTER'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6981/7340 [252:50<13:00, 27.6 steps/min]\u001b[92m19:39:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:10,148 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:39:10,149 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'meta'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'meta'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:39:11,496 - agent.ComputerAgent - INFO - Computer: click({'x': 569, 'y': 372})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 569, 'y': 372})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:12,817 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6983/7340 [252:54<12:55, 27.6 steps/min]2025-08-11 19:39:13,521 - agent.ComputerAgent - INFO - Computer: click({'x': 81, 'y': 148})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 81, 'y': 148})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:39:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:14,827 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 210, 'y': 177}, {'x': 210, 'y': 457}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 210, 'y': 177}, {'x': 210, 'y': 457}]})\n", - "2025-08-11 19:39:15,498 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:39:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:39:16,177 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6985/7340 [252:57<12:51, 27.6 steps/min]\u001b[92m19:39:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:39:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6987/7340 [252:58<12:46, 27.6 steps/min]\u001b[92m19:39:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:17,905 - agent.ComputerAgent - INFO - Computer: click({'x': 461, 'y': 321})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 461, 'y': 321})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6988/7340 [253:01<12:44, 27.6 steps/min]\u001b[92m19:39:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:20,770 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 430})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 430})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6988/7340 [253:02<12:44, 27.6 steps/min]2025-08-11 19:39:21,798 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:39:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/c915dbd9-32bc-40a7-9c07-d437c737419f/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6989/7340 [253:03<12:42, 27.6 steps/min]2025-08-11 19:39:22,498 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:39:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:23,147 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:39:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:39:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6989/7340 [253:04<12:42, 27.6 steps/min]2025-08-11 19:39:23,865 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 209, 'y': 146}, {'x': 281, 'y': 396}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 209, 'y': 146}, {'x': 281, 'y': 396}]})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:39:24,516 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:39:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/invoke \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6989/7340 [253:06<12:42, 27.6 steps/min]2025-08-11 19:39:25,167 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:39:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:39:27,161 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'esc'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'esc'})\n", - "2025-08-11 19:39:27,846 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:39:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:39:28,870 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:39:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/1a178f89-87e5-46d9-a114-22d5fcc5c630/close \"HTTP/1.1 200 OK\"\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6990/7340 [253:10<12:40, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:39:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:39:30,222 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+a'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+a'})\n", - "2025-08-11 19:39:30,916 - agent.ComputerAgent - INFO - Computer: click({'x': 91, 'y': 314})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 91, 'y': 314})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6991/7340 [253:13<12:38, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:39:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:39:33,548 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:39:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:39:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6992/7340 [253:15<12:36, 27.6 steps/min]2025-08-11 19:39:34,246 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:39:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:39:34,914 - agent.ComputerAgent - INFO - Computer: click({'x': 341, 'y': 305})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 341, 'y': 305})\n", - "2025-08-11 19:39:35,577 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:39:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 6992/7340 [253:17<12:36, 27.6 steps/min]2025-08-11 19:39:36,257 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:39:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v1/gyms/OSWorld-Ubuntu \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00/dev/null 2>&1; then\\n echo \\'User charles already exists\\'\\nelse\\n sudo useradd -M -s /usr/sbin/nologin -d / charles\\nfi\\n\\necho \\'charles:Ex@mpleP@55w0rd!\\' | sudo chpasswd\\n\\n# Give the user write access inside the jail via a subdirectory\\nsudo mkdir -p /home/test1/uploads\\nsudo chown charles:charles /home/test1/uploads\\n\\n# Configure OpenSSH to chroot the user and force SFTP only\\nsudo bash -lc \\'cat >>/etc/ssh/sshd_config <<\"EOF\"\\n\\nMatch User charles\\n ChrootDirectory /home/test1\\n ForceCommand internal-sftp\\n AllowTCPForwarding no\\n X11Forwarding no\\nEOF\\'\\n\\n# Restart SSH to apply changes\\nsudo systemctl restart ssh || sudo systemctl restart sshd\\n\\necho \\'Setup complete: user charles restricted to /home/test1 via SFTP.\\''})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'set -e\\nsudo mkdir -p /home/test1/uploads\\nsudo chown root:root /home/test1\\nsudo chmod 755 /home/test1\\n\\n# Create the user for SFTP-only access chrooted to /home/test1\\nif id -u charles >/dev/null 2>&1; then\\n echo \\'User charles already exists\\'\\nelse\\n sudo useradd -M -s /usr/sbin/nologin -d / charles\\nfi\\n\\necho \\'charles:Ex@mpleP@55w0rd!\\' | sudo chpasswd\\n\\n# Give the user write access inside the jail via a subdirectory\\nsudo mkdir -p /home/test1/uploads\\nsudo chown charles:charles /home/test1/uploads\\n\\n# Configure OpenSSH to chroot the user and force SFTP only\\nsudo bash -lc \\'cat >>/etc/ssh/sshd_config <<\"EOF\"\\n\\nMatch User charles\\n ChrootDirectory /home/test1\\n ForceCommand internal-sftp\\n AllowTCPForwarding no\\n X11Forwarding no\\nEOF\\'\\n\\n# Restart SSH to apply changes\\nsudo systemctl restart ssh || sudo systemctl restart sshd\\n\\necho \\'Setup complete: user charles restricted to /home/test1 via SFTP.\\''})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:05<00:00, 1.34s/it]\n", - "\u001b[92m19:41:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7029/7340 [255:01<11:17, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7030/7340 [255:03<11:14, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:41:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:41:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:22,776 - agent.ComputerAgent - INFO - Computer: click({'x': 187, 'y': 52})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 187, 'y': 52})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7030/7340 [255:04<11:14, 27.6 steps/min]\u001b[92m19:41:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:23,473 - agent.ComputerAgent - INFO - Computer: click({'x': 540, 'y': 471})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 540, 'y': 471})\n", - "\u001b[92m19:41:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:24,156 - agent.ComputerAgent - INFO - Computer: click({'x': 865, 'y': 201})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 865, 'y': 201})\n", - "\u001b[92m19:41:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7031/7340 [255:06<11:12, 27.6 steps/min]\u001b[92m19:41:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:41:25,511 - agent.ComputerAgent - INFO - Computer: click({'x': 91, 'y': 314, 'button': 'left'})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 91, 'y': 314, 'button': 'left'})\n", - "2025-08-11 19:41:26,190 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 640, 'scroll_x': 0, 'x': 990, 'y': 709})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 640, 'scroll_x': 0, 'x': 990, 'y': 709})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:26,861 - agent.ComputerAgent - INFO - Computer: click({'x': 13, 'y': 673})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 13, 'y': 673})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:41:28,168 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'F11'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'F11'})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7033/7340 [255:09<11:08, 27.6 steps/min]2025-08-11 19:41:28,840 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:41:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:41:29,543 - agent.ComputerAgent - INFO - Computer: click({'x': 461, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 461, 'y': 101})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7037/7340 [255:11<10:59, 27.6 steps/min]\u001b[92m19:41:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:30,890 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:41:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:41:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:41:31,564 - agent.ComputerAgent - INFO - Computer: click({'x': 510, 'y': 283})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 510, 'y': 283})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7039/7340 [255:16<10:54, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51c56274-d8ae-4edf-8ff1-b637cd2fff66/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:41:35,309 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:41:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:41:36,009 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:41:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7039/7340 [255:17<10:55, 27.6 steps/min]2025-08-11 19:41:36,709 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:41:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:41:37,392 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:41:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:41:38,081 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:41:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7039/7340 [255:21<10:55, 27.6 steps/min]\u001b[92m19:41:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:41:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:41:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:41,479 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:41:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:41:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:42,130 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:41:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:41:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7039/7340 [255:23<10:55, 27.6 steps/min]\u001b[92m19:41:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:42,794 - agent.ComputerAgent - INFO - Computer: click({'x': 996, 'y': 732})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 996, 'y': 732})\n", - "2025-08-11 19:41:43,448 - agent.ComputerAgent - INFO - Computer: click({'x': 223, 'y': 35})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 223, 'y': 35})\n", - "\u001b[92m19:41:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:41:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7039/7340 [255:25<10:55, 27.6 steps/min]2025-08-11 19:41:44,120 - agent.ComputerAgent - INFO - Computer: click({'x': 343, 'y': 195})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 343, 'y': 195})\n", - "2025-08-11 19:41:44,810 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 209, 'y': 146}, {'x': 407, 'y': 399}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 209, 'y': 146}, {'x': 407, 'y': 399}]})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7041/7340 [255:26<10:50, 27.6 steps/min]2025-08-11 19:41:45,450 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:41:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:41:46,139 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:41:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7043/7340 [255:30<10:46, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:41:50,003 - agent.ComputerAgent - INFO - Computer: type({'text': 'chrome refresh 2023'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'chrome refresh 2023'})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7043/7340 [255:31<10:46, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:41:51,944 - agent.ComputerAgent - INFO - Computer: type({'text': 'Yann LeCun Google Scholar'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Yann LeCun Google Scholar'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7044/7340 [255:33<10:44, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:52,582 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:41:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:41:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7045/7340 [255:35<10:42, 27.6 steps/min]\u001b[92m19:41:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:41:54,575 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:41:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:41:55,282 - agent.ComputerAgent - INFO - Computer: click({'x': 90, 'y': 314, 'button': 'left'})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 90, 'y': 314, 'button': 'left'})\n", - "\u001b[92m19:41:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/reset \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7045/7340 [255:37<10:42, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:41:55,970 - agent.ComputerAgent - INFO - Computer: double_click({'x': 12, 'y': 524})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 12, 'y': 524})\n", - "\u001b[92m19:41:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:41:56,628 - agent.ComputerAgent - INFO - Computer: click({'x': 164, 'y': 427})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 164, 'y': 427})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7046/7340 [255:38<10:40, 27.6 steps/min]2025-08-11 19:41:57,310 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:41:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:41:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7048/7340 [255:40<10:35, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:41:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:41:59,486 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 590, 'scroll_x': 0, 'x': 991, 'y': 420})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 590, 'scroll_x': 0, 'x': 991, 'y': 420})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7048/7340 [255:41<10:35, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/invoke \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7049/7340 [255:42<10:33, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:42:02,043 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:42:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0180c5d2-a012-4261-b093-ed34f443f269/close \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7049/7340 [255:43<10:33, 27.6 steps/min]2025-08-11 19:42:03,400 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:42:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:42:04,040 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:42:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:42:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:42:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51c56274-d8ae-4edf-8ff1-b637cd2fff66/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:42:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7049/7340 [255:47<10:33, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:42:06,703 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:42:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00Background Cover'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '>Background Cover'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/7f112db6-0b60-4e6c-86f5-0d87dc91f371/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/create_environment \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7072/7340 [256:50<9:43, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:43:08,956 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:43:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:43:10,282 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ENTER'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ENTER'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:12,251 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ENTER'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ENTER'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:43:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7074/7340 [256:54<9:39, 27.5 steps/min]2025-08-11 19:43:13,540 - agent.ComputerAgent - INFO - LLM processing started with 1 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 1 messages\n", - "\u001b[92m19:43:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:43:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:43:14,917 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "2025-08-11 19:43:15,565 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:43:15,566 - agent.ComputerAgent - INFO - Computer: click({'x': 75, 'y': 166})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 75, 'y': 166})\n", - "\u001b[92m19:43:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7076/7340 [256:57<9:35, 27.5 steps/min]2025-08-11 19:43:16,239 - agent.ComputerAgent - INFO - Computer: click({'x': 985, 'y': 759})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 985, 'y': 759})\n", - "2025-08-11 19:43:16,881 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:43:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7078/7340 [256:58<9:30, 27.5 steps/min]2025-08-11 19:43:17,557 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:43:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/765aa707-cf44-4dd0-8933-2c2b94870afd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:18,888 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:43:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7079/7340 [257:00<9:28, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:43:20,265 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl++'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl++'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7079/7340 [257:02<9:28, 27.5 steps/min]\u001b[92m19:43:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:21,621 - agent.ComputerAgent - INFO - Computer: scroll({'scroll_y': 588, 'scroll_x': 0, 'x': 991, 'y': 433})\n", - "INFO:agent.ComputerAgent:Computer: scroll({'scroll_y': 588, 'scroll_x': 0, 'x': 991, 'y': 433})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/51c56274-d8ae-4edf-8ff1-b637cd2fff66/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:23,319 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:43:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:43:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7079/7340 [257:05<9:28, 27.5 steps/min]\u001b[92m19:43:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:43:24,668 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:43:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:43:25,366 - agent.ComputerAgent - INFO - Computer: click({'x': 341, 'y': 75})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 341, 'y': 75})\n", - "\u001b[92m19:43:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:43:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:26,376 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:43:26,377 - agent.ComputerAgent - INFO - Computer: click({'x': 48, 'y': 52})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 48, 'y': 52})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:43:27,706 - agent.ComputerAgent - INFO - Computer: click({'x': 213, 'y': 183})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 213, 'y': 183})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7080/7340 [257:09<9:26, 27.5 steps/min]2025-08-11 19:43:28,338 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:43:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:43:29,016 - agent.ComputerAgent - INFO - Computer: click({'x': 666, 'y': 279})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 666, 'y': 279})\n", - " 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7083/7340 [257:10<9:19, 27.5 steps/min]2025-08-11 19:43:29,687 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:43:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:43:30,358 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:43:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7084/7340 [257:12<9:17, 27.5 steps/min]2025-08-11 19:43:31,038 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:43:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v1/environments/765aa707-cf44-4dd0-8933-2c2b94870afd/reset \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7084/7340 [257:13<9:17, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:43:32,348 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:43:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:43:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:33,008 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:43:33,009 - agent.ComputerAgent - INFO - Computer: click({'x': 79, 'y': 157})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 79, 'y': 157})\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7084/7340 [257:14<9:17, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7085/7340 [257:15<9:15, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:43:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:35,405 - agent.ComputerAgent - INFO - Computer: click({'x': 679, 'y': 563})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 679, 'y': 563})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:43:36,736 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl++'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl++'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:37 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/0c64a3b4-e9b0-46c1-a580-cdcf62b74e44/close \"HTTP/1.1 200 OK\"\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7086/7340 [257:19<9:13, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/765aa707-cf44-4dd0-8933-2c2b94870afd/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:43:38,000 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:43:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7087/7340 [257:20<9:11, 27.5 steps/min]2025-08-11 19:43:39,348 - agent.ComputerAgent - INFO - Computer: click({'button': 'left', 'x': 308, 'y': 153})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'left', 'x': 308, 'y': 153})\n", - "2025-08-11 19:43:40,018 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:43:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:43:40,687 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:43:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:43:41,369 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:43:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7087/7340 [257:23<9:11, 27.5 steps/min]\u001b[92m19:43:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:43:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:43:43,783 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:43:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00>/etc/ssh/sshd_config <<\"EOF\"\\n\\nMatch User charles\\n ChrootDirectory /home/test1\\n ForceCommand internal-sftp\\n AllowTCPForwarding no\\n X11Forwarding no\\nEOF\\'\\n\\n# Apply changes\\nsudo systemctl restart ssh || sudo systemctl restart sshd\\n\\n# Show resulting setup\\nid charles\\nls -ld /home/test1 /home/test1/uploads'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'set -e\\nsudo mkdir -p /home/test1\\nsudo chown root:root /home/test1\\nsudo chmod 755 /home/test1\\n\\n# Create user with no shell access\\nsudo useradd -M -s /usr/sbin/nologin -d / charles || true\\n\\necho \\'charles:Ex@mpleP@55w0rd!\\' | sudo chpasswd\\n\\n# Writable directory inside the jail\\nsudo mkdir -p /home/test1/uploads\\nsudo chown charles:charles /home/test1/uploads\\n\\n# Configure SSH to restrict user to SFTP chroot at /home/test1\\nsudo bash -lc \\'cat >>/etc/ssh/sshd_config <<\"EOF\"\\n\\nMatch User charles\\n ChrootDirectory /home/test1\\n ForceCommand internal-sftp\\n AllowTCPForwarding no\\n X11Forwarding no\\nEOF\\'\\n\\n# Apply changes\\nsudo systemctl restart ssh || sudo systemctl restart sshd\\n\\n# Show resulting setup\\nid charles\\nls -ld /home/test1 /home/test1/uploads'})\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7110/7340 [257:49<8:20, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:44:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - " 97%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ--| 7112/7340 [257:50<8:15, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/acf3037a-4b6c-4ea8-b81c-ffc2e76132e1/invoke \"HTTP/1.1 200 OK\"\n", - "Loading checkpoint shards: 0%| | 0/4 [00:00\n", + "\n", + "\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "api.anthropic.com | 520: Web server is returning an unknown error\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "
\n", + "
\n", + "

\n", + " Web server is returning an unknown error\n", + " Error code 520\n", + "

\n", + "
\n", + " Visit cloudflare.com for more information.\n", + "
\n", + "
2025-08-28 02:25:27 UTC
\n", + "
\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " You\n", + "

\n", + " \n", + " Browser\n", + " \n", + "

\n", + " Working\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Boston\n", + "

\n", + " \n", + " Cloudflare\n", + " \n", + "

\n", + " Working\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " api.anthropic.com\n", + "

\n", + " \n", + " Host\n", + " \n", + "

\n", + " Error\n", + "
\n", + "\n", + "
\n", + "
\n", + "
\n", + "\n", + "
\n", + "
\n", + "
\n", + "

What happened?

\n", + "

There is an unknown connection issue between Cloudflare and the origin web server. As a result, the web page can not be displayed.

\n", + "
\n", + "
\n", + "

What can I do?

\n", + "

If you are a visitor of this website:

\n", + "

Please try again in a few minutes.

\n", + "\n", + "

If you are the owner of this website:

\n", + "

There is an issue between Cloudflare's cache and your origin web server. Cloudflare monitors for these errors and automatically investigates the cause. To help support the investigation, you can pull the corresponding error log from your web server and submit it our support team. Please include the Ray ID (which is at the bottom of this error page). Additional troubleshooting resources.

\n", + "
\n", + "
\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_ea5aaf6f523182f18e0f091a9fdd0448 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_8af625c3a80f744a82b53de2fa170ec4 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_54dcc566cc9808ad3717a436b5b05eb8 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 232, 'y': 132}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 232, 'y': 132}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 232, 'y': 132}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 232, 'y': 132}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 232, 'y': 132}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_dab85c3699d032d81cd81a4701c0d9a0 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_ef74c41fa05a294694e54bacca43b8e7 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 13 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionWait.type\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y...500, 'x': 642, 'y': 206}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Got 502 from https://mcp.hud.so/v3/mcp, retrying in 1.0s (attempt 1/3)\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 458, 'y': 473}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 458, 'y': 473}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 458, 'y': 473}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 458, 'y': 473}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 458, 'y': 473}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 908, 'y': 539}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 908, 'y': 539}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 908, 'y': 539}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 908, 'y': 539}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 908, 'y': 539}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 908, 'y': 539}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e0b87a8685c4005244e3d06efbba8220 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_3abda4050d3fabf282d5a26545d7a5d0 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_12ce02ed5d7b007cbd81f458ff55b7c3 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_8ca73eef0081bfb8e848b7cb8ab91d1f in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 968, 'y': 172}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 968, 'y': 172}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 968, 'y': 172}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 968, 'y': 172}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 968, 'y': 172}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 968, 'y': 172}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_40de5c91f10e693a9581d5bbf6fcff8b in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 20 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 100, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.APIConnectionError: AnthropicException - Cloudflare encountered an error processing this request: Bad Gateway\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e6e416cf9120e55f85f61080c8de0c8b in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 353, 'y': 75}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 353, 'y': 75}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 353, 'y': 75}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 353, 'y': 75}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 353, 'y': 75}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 353, 'y': 75}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 317, 'y': 466}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 317, 'y': 466}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 317, 'y': 466}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 317, 'y': 466}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 317, 'y': 466}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_c648a052ea88511e618f86970f55b03a in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e99451b6d0417ed04f30f4b2081e4dff in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_daefffa7a65d81622f83b3c6668e3dc0 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionWait.type\n", + " Field required [type=missing, input_value={'click': 'left', 'button...left', 'x': 82, 'y': 34}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "Step failed: 20 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 5, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 22 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionWait.type\n", + " Field required [type=missing, input_value={'keys': ['cmd', 'l'], 'keypress': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_edb099832d6d97a269f445a50809c93b in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_1b3cd9135846af8b540e23eb46e2fbb4 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_91af97c24e9aa860be14e9eab94d2fde in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_f2b2983c5c830a7bd102653fd0a028a6 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 22 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'keypress'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='keypress', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 392, 'y': 324}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 392, 'y': 324}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 392, 'y': 324}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 392, 'y': 324}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 392, 'y': 324}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 392, 'y': 324}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_07880876182adfd1c9fb8af49840fbe5 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_171c97f391507972db1c8b55b0026d80 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_fa514903703f38c748dcde9605a6dbe1 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e8ba77d8c521643a7b0789e416e80f98 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 20 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y': 3, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Got 502 from https://mcp.hud.so/v3/mcp, retrying in 1.0s (attempt 1/3)\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 22 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'keys': ['ctrl'], 'type': 'keyrelease'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='keyrelease', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_0b1bc25b68c7036363abf9252af6f4fe in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e37cd4c1d34010ee9ec8a5e8c5de4336 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 20 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -300, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_30c460b92c1dd64f81cc1fd271bb3802 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_d66d2a19da572b66de44e59b2777fd40 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Got 502 from https://mcp.hud.so/v3/mcp, retrying in 1.0s (attempt 1/3)\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'click', 'button': 'left'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_407ca9ac6cc60cf2528ab26d0ebb216a in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 650, 'y': 137}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 650, 'y': 137}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 650, 'y': 137}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 650, 'y': 137}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 650, 'y': 137}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_eebc630287ffdc99505d68ec452a99a5 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 458, 'y': 285}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 458, 'y': 285}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 458, 'y': 285}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 458, 'y': 285}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 458, 'y': 285}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 458, 'y': 285}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionWait.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 24, 'y': 370}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 987, 'y': 170}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 987, 'y': 170}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 987, 'y': 170}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 987, 'y': 170}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 987, 'y': 170}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 987, 'y': 170}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 614, 'y': 35}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 614, 'y': 35}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 614, 'y': 35}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 614, 'y': 35}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 614, 'y': 35}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 614, 'y': 35}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 22 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'type': 'exit', 'text': 'exit'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='exit', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_5c53aa95e64ab70c42813298eb1ec717 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_7ff64c904c8f3851b67717462bfd152f in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_40eae6ea4e0389d5feb6b124f3d12bd1 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Unclosed client session\n", + "client_session: \n", + "Unclosed client session\n", + "client_session: \n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_5274aa770c5dfdd785fda4df6da6b9de in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 647, 'y': 268}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 647, 'y': 268}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 647, 'y': 268}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 647, 'y': 268}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 647, 'y': 268}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 647, 'y': 268}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_ace80396f01199cf1f7b2959db30828f in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_d3fc36b25719a4310c4ef00ee9fb80e2 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 893, 'y': 220}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 893, 'y': 220}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 893, 'y': 220}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 893, 'y': 220}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 893, 'y': 220}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 893, 'y': 220}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 529, 'y': 242}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 529, 'y': 242}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 529, 'y': 242}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 529, 'y': 242}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 529, 'y': 242}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Got 502 from https://mcp.hud.so/v3/mcp, retrying in 1.0s (attempt 1/3)\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.APIConnectionError: AnthropicException - Cloudflare encountered an error processing this request: Bad Gateway\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_69e1e17a074f2106e7d9f550d06f2129 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 542, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 542, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 542, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 542, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 542, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 542, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e4cfa4d5e4d1c7a29abf1e99bdf13d4c in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 266, 'y': 65}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 266, 'y': 65}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 266, 'y': 65}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 266, 'y': 65}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 266, 'y': 65}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_0de88e377d3e016333e75cbb721467d5 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_6a72d8198557e585b311fc2f574c06ae in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_565b6f653b140a9f9616812bca9bede4 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: AnthropicException - {\"type\":\"error\",\"error\":{\"type\":\"api_error\",\"message\":\"Internal server error\"},\"request_id\":\"req_011CSZXyZhhMMUkqx5Rn49nH\"}. Handle with `litellm.InternalServerError`.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_ca61663ebdf6a23abe76f8a623e9cba7 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_19b4c7cbfd607aceb64225a211bcb63c in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_f5a55c799f314865c754e9ef2085d6aa in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 566, 'y': 106}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 566, 'y': 106}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 566, 'y': 106}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 566, 'y': 106}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 566, 'y': 106}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 566, 'y': 106}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_75d03c61c6f4b25a20022c405da92f3e in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.ServiceUnavailableError: AnthropicException - upstream connect error or disconnect/reset before headers. reset reason: connection termination. Handle with `litellm.ServiceUnavailableError`.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 449, 'y': 287}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 449, 'y': 287}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 449, 'y': 287}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 449, 'y': 287}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 449, 'y': 287}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_864eb3e72deacbac1273d63ec1300d63 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_6e9676b4bf46acb690f7f03e5000c774 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 318, 'y': 428}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 318, 'y': 428}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 318, 'y': 428}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 318, 'y': 428}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 318, 'y': 428}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 318, 'y': 428}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 489, 'y': 37}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 489, 'y': 37}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 489, 'y': 37}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 489, 'y': 37}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 489, 'y': 37}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_aff343428a6d53531e2e7e53961a08a1 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 264, 'y': 458}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 264, 'y': 458}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 264, 'y': 458}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 264, 'y': 458}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 264, 'y': 458}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 264, 'y': 458}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_62d62a7d978a743da6e34251ab321b6b in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_91ef10c85551d3461eb078f9a9fab016 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 307, 'y': 231}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 307, 'y': 231}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 307, 'y': 231}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 307, 'y': 231}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 307, 'y': 231}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 307, 'y': 231}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_d34610618a89b212bd65cab09d37ffce in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e0724c94603820eeee26337000c7f280 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_d5e422c31b3ba5e42c1693877282b13c in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Got 502 from https://mcp.hud.so/v3/mcp, retrying in 1.0s (attempt 1/3)\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Got 502 from https://mcp.hud.so/v3/mcp, retrying in 1.0s (attempt 1/3)\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 263, 'y': 351}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 263, 'y': 351}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 263, 'y': 351}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 263, 'y': 351}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 263, 'y': 351}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: OpenAIException - \n", + "\n", + "\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "api.openai.com | 520: Web server is returning an unknown error\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "
\n", + "
\n", + "

\n", + " Web server is returning an unknown error\n", + " Error code 520\n", + "

\n", + "
\n", + " Visit cloudflare.com for more information.\n", + "
\n", + "
2025-08-28 06:05:09 UTC
\n", + "
\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " You\n", + "

\n", + " \n", + " Browser\n", + " \n", + "

\n", + " Working\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Boston\n", + "

\n", + " \n", + " Cloudflare\n", + " \n", + "

\n", + " Working\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " api.openai.com\n", + "

\n", + " \n", + " Host\n", + " \n", + "

\n", + " Error\n", + "
\n", + "\n", + "
\n", + "
\n", + "
\n", + "\n", + "
\n", + "
\n", + "
\n", + "

What happened?

\n", + "

There is an unknown connection issue between Cloudflare and the origin web server. As a result, the web page can not be displayed.

\n", + "
\n", + "
\n", + "

What can I do?

\n", + "

If you are a visitor of this website:

\n", + "

Please try again in a few minutes.

\n", + "\n", + "

If you are the owner of this website:

\n", + "

There is an issue between Cloudflare's cache and your origin web server. Cloudflare monitors for these errors and automatically investigates the cause. To help support the investigation, you can pull the corresponding error log from your web server and submit it our support team. Please include the Ray ID (which is at the bottom of this error page). Additional troubleshooting resources.

\n", + "
\n", + "
\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_731a2b625462c9d2d582d1053510b6d7 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_ff2d8a59b90db337014147e1d007bd48 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 20 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'scroll_x': 0, 'scroll_y... -800, 'type': 'scroll'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='scroll', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 710, 'y': 429}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 710, 'y': 429}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 710, 'y': 429}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 710, 'y': 429}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 710, 'y': 429}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_a0d2cc3868d537a5f9fbe2594ff3111b in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 624, 'y': 364}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 624, 'y': 364}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 624, 'y': 364}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 624, 'y': 364}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 624, 'y': 364}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 624, 'y': 364}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 654, 'y': 136}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 654, 'y': 136}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 654, 'y': 136}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 654, 'y': 136}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 654, 'y': 136}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 510, 'y': 372}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 510, 'y': 372}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 510, 'y': 372}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 510, 'y': 372}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 510, 'y': 372}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 510, 'y': 372}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 23 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionWait.type\n", + " Field required [type=missing, input_value={'action': ''}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_7cede1574442edf20721264e92def0e5 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_01ffb88ba8ce9674981142a95e74d761 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_66612b5bdb381864a8d969606cef24ae in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_8e8f0499e19ab802c3bb846c1760b91f in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionWait.type\n", + " Field required [type=missing, input_value={'click': 'left', 'x': 373, 'y': 390}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_94e02af05e07b713a323768e11e50240 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_c1976117ae2105b15f03270251e60d7e in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 282, 'y': 274}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 282, 'y': 274}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 282, 'y': 274}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 282, 'y': 274}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 282, 'y': 274}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 282, 'y': 274}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool execution failed: Tool call failed: (-32600, 'Invalid Request')\n", + "Evaluation phase failed: [MCPToolResult(meta=None, content=[TextContent(type='text', text=\"Tool call failed: (-32600, 'Invalid Request')\", annotations=None, meta=None)], structuredContent=None, isError=True)]\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_e75ae0b9d7688b63d8b19a90bc12c66f in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_213c31db0a325578b61cc609a035fe6c in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 474, 'y': 434}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 474, 'y': 434}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 474, 'y': 434}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 474, 'y': 434}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 474, 'y': 434}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 474, 'y': 434}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 217, 'y': 66}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 217, 'y': 66}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 217, 'y': 66}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 217, 'y': 66}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ick', 'x': 217, 'y': 66}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_b18b0c5c4b7fa1ecdf2a38138795e4db in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 129}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 129}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 129}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 129}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 129}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 424}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 424}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 424}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 424}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 462, 'y': 424}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 271, 'y': 234}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 271, 'y': 234}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 271, 'y': 234}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 271, 'y': 234}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 271, 'y': 234}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 271, 'y': 234}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 407, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 407, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 407, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 407, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 407, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 407, 'y': 397}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 360, 'y': 200}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 360, 'y': 200}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 360, 'y': 200}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 360, 'y': 200}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 360, 'y': 200}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 360, 'y': 200}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 15 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.button\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 881, 'y': 579}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 881, 'y': 579}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 881, 'y': 579}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 881, 'y': 579}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 881, 'y': 579}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'type': 'triple_click', 'x': 881, 'y': 579}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 444, 'y': 472}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 444, 'y': 472}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 444, 'y': 472}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 444, 'y': 472}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 444, 'y': 472}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 14 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.type\n", + " Input should be 'click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 536, 'y': 240}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 536, 'y': 240}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 536, 'y': 240}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 536, 'y': 240}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type'...ck', 'x': 536, 'y': 240}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='triple_click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_99932258b791a5b87203ab08daabc63d in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: 21 validation errors for ResponseComputerToolCall\n", + "action.ActionClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.type\n", + " Input should be 'double_click' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionDoubleClick.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDoubleClick.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.path\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionDrag.type\n", + " Input should be 'drag' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionKeypress.keys\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionKeypress.type\n", + " Input should be 'keypress' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.type\n", + " Input should be 'move' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionMove.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionMove.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScreenshot.type\n", + " Input should be 'screenshot' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.scroll_x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.scroll_y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.type\n", + " Input should be 'scroll' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionScroll.x\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionScroll.y\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.text\n", + " Field required [type=missing, input_value={'button': 'left', 'type': 'click'}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/missing\n", + "action.ActionType.type\n", + " Input should be 'type' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "action.ActionWait.type\n", + " Input should be 'wait' [type=literal_error, input_value='click', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.11/v/literal_error\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_362f0f22e6cf3e4d7b4beb474c63be5e in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: [Errno 28] No space left on device\n", + "Step failed: [Errno 28] No space left on device\n", + "Step failed: [Errno 28] No space left on device\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: [Errno 28] No space left on device\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: [Errno 28] No space left on device\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: litellm.InternalServerError: InternalServerError: OpenAIException - {\n", + " \"error\": {\n", + " \"message\": \"An error occurred while processing your request. You can retry your request, or contact us through our help center at help.openai.com if the error persists. Please include the request ID req_bd3347660b6e1902a36bbdbb20874e62 in your message.\",\n", + " \"type\": \"model_error\",\n", + " \"param\": null,\n", + " \"code\": null\n", + " }\n", + "}\n", + "Step failed: [Errno 28] No space left on device\n", + "Step failed: [Errno 28] No space left on device\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: [Errno 28] No space left on device\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Step failed: [Errno 28] No space left on device\n", + "Step failed: [Errno 28] No space left on device: 'trajectories\\\\osworld_95557e\\\\2025-08-28_open_claude-opus-4-20_032836_c76d\\\\turn_000\\\\0002_agent_response.json'\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n", + "Tool evaluate has an output schema but did not return structured content. Continuing without structured content validation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 19:46:30,722 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:46:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:46:31,394 - agent.ComputerAgent - INFO - Computer: click({'x': 753, 'y': 322})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 753, 'y': 322})\n", - "\u001b[92m19:46:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7188/7340 [260:13<5:30, 27.6 steps/min]2025-08-11 19:46:32,056 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 275, 'y': 152}, {'x': 81, 'y': 155}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 275, 'y': 152}, {'x': 81, 'y': 155}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7189/7340 [260:15<5:27, 27.6 steps/min]\u001b[92m19:46:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:34,051 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:46:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:46:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:46:34,715 - agent.ComputerAgent - INFO - Agent: Taking a screenshot to see the current computer screen.\n", - "INFO:agent.ComputerAgent:Agent: Taking a screenshot to see the current computer screen.\n", - "2025-08-11 19:46:34,716 - agent.ComputerAgent - INFO - Computer: click({'x': 266, 'y': 164})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 266, 'y': 164})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:46:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7190/7340 [260:17<5:25, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:46:35,946 - agent.ComputerAgent - INFO - Computer: click({'x': 182, 'y': 110})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 182, 'y': 110})\n", - "\u001b[92m19:46:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:36,612 - agent.ComputerAgent - INFO - Computer: click({'x': 996, 'y': 732})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 996, 'y': 732})\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7193/7340 [260:19<5:19, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7193/7340 [260:20<5:19, 27.6 steps/min]\u001b[92m19:46:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:39,437 - agent.ComputerAgent - INFO - Computer: click({'x': 244, 'y': 89})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 244, 'y': 89})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7193/7340 [260:21<5:19, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:41,619 - agent.ComputerAgent - INFO - Computer: click({'x': 922, 'y': 181})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 922, 'y': 181})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7194/7340 [260:23<5:17, 27.6 steps/min]2025-08-11 19:46:42,304 - agent.ComputerAgent - INFO - LLM processing started with 6 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 6 messages\n", - "\u001b[92m19:46:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:46:42,935 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:46:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:46:43,589 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:46:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:46:44,263 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:46:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:46:44,944 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:46:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7195/7340 [260:26<5:14, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:46:46,301 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:46:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7195/7340 [260:28<5:14, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:46:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:47,001 - agent.ComputerAgent - INFO - Computer: double_click({'x': 213, 'y': 457})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 213, 'y': 457})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:46:48,346 - agent.ComputerAgent - INFO - Computer: type({'text': \"sudo mkdir -p /home/test1 && sudo chown root:root /home/test1 && sudo chmod 755 /home/test1 && echo 'dir ok'\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"sudo mkdir -p /home/test1 && sudo chown root:root /home/test1 && sudo chmod 755 /home/test1 && echo 'dir ok'\"})\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7195/7340 [260:30<5:14, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7197/7340 [260:31<5:10, 27.6 steps/min]\u001b[92m19:46:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:50,156 - agent.ComputerAgent - INFO - Computer: click({'x': 430, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 430, 'y': 101})\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7197/7340 [260:32<5:10, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:46:51,333 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:46:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:46:52,605 - agent.ComputerAgent - INFO - Computer: type({'text': 'Maturity Date'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Maturity Date'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:46:54,635 - agent.ComputerAgent - INFO - Computer: type({'text': 'kid3-cli -h'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'kid3-cli -h'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7198/7340 [260:36<5:08, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:46:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:57,297 - agent.ComputerAgent - INFO - Computer: type({'text': '20 cm'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '20 cm'})\n", - "\u001b[92m19:46:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7200/7340 [260:39<5:04, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:46:57,971 - agent.ComputerAgent - INFO - Computer: click({'x': 75, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 75, 'y': 178})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:46:58,633 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:46:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:46:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7201/7340 [260:40<5:01, 27.6 steps/min]2025-08-11 19:46:59,277 - agent.ComputerAgent - INFO - Computer: click({'x': 256, 'y': 155})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 256, 'y': 155})\n", - "\u001b[92m19:46:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:46:59,946 - agent.ComputerAgent - INFO - Computer: click({'x': 118, 'y': 181})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 118, 'y': 181})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7202/7340 [260:42<4:59, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7204/7340 [260:43<4:55, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:47:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:02,935 - agent.ComputerAgent - INFO - Computer: double_click({'x': 184, 'y': 105})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 184, 'y': 105})\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7204/7340 [260:44<4:55, 27.6 steps/min]\u001b[92m19:47:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:03,599 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 991, 'y': 487}, {'x': 991, 'y': 416}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 991, 'y': 487}, {'x': 991, 'y': 416}]})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7205/7340 [260:46<4:53, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:04,924 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:47:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:47:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7206/7340 [260:47<4:50, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:06,274 - agent.ComputerAgent - INFO - Computer: click({'x': 238, 'y': 310})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 238, 'y': 310})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:06,924 - agent.ComputerAgent - INFO - LLM processing started with 8 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 8 messages\n", - "\u001b[92m19:47:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:47:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7206/7340 [260:48<4:50, 27.6 steps/min]2025-08-11 19:47:07,564 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:47:07 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:08,229 - agent.ComputerAgent - INFO - Computer: click({'x': 229, 'y': 157})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 229, 'y': 157})\n", - "2025-08-11 19:47:08,884 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:47:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7207/7340 [260:51<4:48, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:10,222 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:47:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:10,864 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:47:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:47:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7208/7340 [260:53<4:46, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:12,222 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:47:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:12,865 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:47:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:13,523 - agent.ComputerAgent - INFO - Computer: click({'x': 510, 'y': 449})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 510, 'y': 449})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:14,844 - agent.ComputerAgent - INFO - Computer: type({'text': 'Revenue'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Revenue'})\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7208/7340 [260:56<4:46, 27.6 steps/min]\u001b[92m19:47:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:16,856 - agent.ComputerAgent - INFO - Computer: click({'x': 145, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 145, 'y': 53})\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7210/7340 [260:58<4:42, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:18,225 - agent.ComputerAgent - INFO - Computer: click({'x': 210, 'y': 185})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 210, 'y': 185})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:19,584 - agent.ComputerAgent - INFO - Agent: Yesβ€”you can make Thunderbird run filters automatically on subfolders (not just Inbox). Do the following once per account you want this behavior for.\n", - "\n", - "1) Turn on the hidden preference to run filters on non-Inbox folders\n", - "- Thunderbird > Settings > General > scroll to the bottom > Config Editor…\n", - "- In the search box, type: applyIncomingFilters\n", - "- If mail.server.default.applyIncomingFilters is false or missing, set it to true.\n", - "- If you prefer per-account control, also set mail.server.serverN.applyIncomingFilters = true for the specific account (N is the server number for that account; you can set both default and per-account if you like).\n", - "\n", - "2) Ensure Thunderbird actually checks those subfolders for new mail\n", - "- For each subfolder you want filters to run on: right‑click the folder > Properties > check β€œWhen getting new messages, always check this folder” > OK.\n", - "- Alternatively (account-wide): Account Settings > Server Settings (for the IMAP account) > enable β€œCheck for new messages at startup” and β€œCheck for new messages every X minutes.” In Advanced IMAP settings (or via Config Editor), ensure mail.server.serverN.check_all_folders_for_new = true to have Thunderbird poll all folders.\n", - "\n", - "3) Make sure your filters are set to run on incoming mail\n", - "- Tools (≑ menu) > Message Filters.\n", - "- Choose the account, edit each filter you want, and under β€œApply filter when,” tick the β€œGetting New Mail” options (before/after junk classification as appropriate). Save.\n", - "\n", - "4) Restart Thunderbird\n", - "- Close and reopen Thunderbird so the new prefs apply cleanly.\n", "\n", - "5) Test\n", - "- Deliver a message directly to a subfolder (e.g., by a server-side rule or by sending to an address that your server routes into that folder). When Thunderbird detects new mail in that subfolder, your filters should run automatically.\n", + "\u001b[92mβœ“ Job 'osworld 95557e openai/computer-use-preview+anthropic/claude-opus-4-20250514' complete!\u001b[0m \u001b[2mView all results at:\u001b[0m \u001b[1m\u001b[33mhttps://app.hud.so/jobs/fe05805d-4da9-4fc6-84b5-5c518528fd3c\u001b[0m\n", "\n", - "Notes and limitations\n", - "- IMAP required: This works when new mail arrives directly into that subfolder on the server (IMAP). For POP accounts and Local Folders, Thunderbird only β€œreceives” mail into Inbox; filters won’t auto-trigger simply because a message...\n", - "INFO:agent.ComputerAgent:Agent: Yesβ€”you can make Thunderbird run filters automatically on subfolders (not just Inbox). Do the following once per account you want this behavior for.\n", - "\n", - "1) Turn on the hidden preference to run filters on non-Inbox folders\n", - "- Thunderbird > Settings > General > scroll to the bottom > Config Editor…\n", - "- In the search box, type: applyIncomingFilters\n", - "- If mail.server.default.applyIncomingFilters is false or missing, set it to true.\n", - "- If you prefer per-account control, also set mail.server.serverN.applyIncomingFilters = true for the specific account (N is the server number for that account; you can set both default and per-account if you like).\n", - "\n", - "2) Ensure Thunderbird actually checks those subfolders for new mail\n", - "- For each subfolder you want filters to run on: right‑click the folder > Properties > check β€œWhen getting new messages, always check this folder” > OK.\n", - "- Alternatively (account-wide): Account Settings > Server Settings (for the IMAP account) > enable β€œCheck for new messages at startup” and β€œCheck for new messages every X minutes.” In Advanced IMAP settings (or via Config Editor), ensure mail.server.serverN.check_all_folders_for_new = true to have Thunderbird poll all folders.\n", - "\n", - "3) Make sure your filters are set to run on incoming mail\n", - "- Tools (≑ menu) > Message Filters.\n", - "- Choose the account, edit each filter you want, and under β€œApply filter when,” tick the β€œGetting New Mail” options (before/after junk classification as appropriate). Save.\n", - "\n", - "4) Restart Thunderbird\n", - "- Close and reopen Thunderbird so the new prefs apply cleanly.\n", - "\n", - "5) Test\n", - "- Deliver a message directly to a subfolder (e.g., by a server-side rule or by sending to an address that your server routes into that folder). When Thunderbird detects new mail in that subfolder, your filters should run automatically.\n", - "\n", - "Notes and limitations\n", - "- IMAP required: This works when new mail arrives directly into that subfolder on the server (IMAP). For POP accounts and Local Folders, Thunderbird only β€œreceives” mail into Inbox; filters won’t auto-trigger simply because a message...\n", - "2025-08-11 19:47:20,255 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 3021\n", - " - prompt_tokens: 3980\n", - " - total_tokens: 7001\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 2496\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 1920\n", - " - response_cost: $0.0330\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 3021\n", - " - prompt_tokens: 3980\n", - " - total_tokens: 7001\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 2496\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 1920\n", - " - response_cost: $0.0330\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7212/7340 [261:01<4:37, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:21,621 - agent.ComputerAgent - INFO - Computer: type({'text': ' chrome refresh 2023'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': ' chrome refresh 2023'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:22,286 - agent.ComputerAgent - INFO - Computer: click({'x': 239, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 239, 'y': 178})\n", - "2025-08-11 19:47:22,928 - agent.ComputerAgent - INFO - Computer: click({'x': 980, 'y': 60})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 980, 'y': 60})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7213/7340 [261:04<4:35, 27.6 steps/min]2025-08-11 19:47:23,594 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:47:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:24,237 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:47:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7216/7340 [261:06<4:29, 27.6 steps/min]2025-08-11 19:47:24,914 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:47:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:26,264 - agent.ComputerAgent - INFO - Agent: The Impress slide was exported to PNG using the default settings and saved as β€œres.png” on the Desktop. Task completed.\n", - "INFO:agent.ComputerAgent:Agent: The Impress slide was exported to PNG using the default settings and saved as β€œres.png” on the Desktop. Task completed.\n", - "2025-08-11 19:47:26,906 - agent.ComputerAgent - INFO - Total usage:\n", - " - completion_tokens: 162\n", - " - prompt_tokens: 8778\n", - " - total_tokens: 8940\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 128\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 6656\n", - " - response_cost: $0.0051\n", - "INFO:agent.ComputerAgent:Total usage:\n", - " - completion_tokens: 162\n", - " - prompt_tokens: 8778\n", - " - total_tokens: 8940\n", - " - completion_tokens_details:\n", - " - accepted_prediction_tokens: 0\n", - " - audio_tokens: 0\n", - " - reasoning_tokens: 128\n", - " - rejected_prediction_tokens: 0\n", - " - prompt_tokens_details:\n", - " - audio_tokens: 0\n", - " - cached_tokens: 6656\n", - " - response_cost: $0.0051\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cd53d966-2507-485c-bbe9-ee55dbbeddd0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/5e73167c-1836-4752-b7e8-57434e5d7875/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7217/7340 [261:09<4:27, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/25f45afe-ee57-4629-9991-c515438accab/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - " 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7217/7340 [261:10<4:27, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cd53d966-2507-485c-bbe9-ee55dbbeddd0/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:30,124 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:47:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7233/7340 [261:12<3:51, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cd53d966-2507-485c-bbe9-ee55dbbeddd0/close \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:31,452 - agent.ComputerAgent - INFO - LLM processing started with 10 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 10 messages\n", - "\u001b[92m19:47:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:32,084 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:47:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:47:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7233/7340 [261:13<3:51, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:32,764 - agent.ComputerAgent - INFO - Computer: click({'x': 76, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 76, 'y': 53})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:33,390 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:47:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7235/7340 [261:15<3:47, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/6f3b006b-141d-439d-b6cb-eed7bd6483c3/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7245/7340 [261:16<3:25, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9882ec8e-4618-4be3-802e-bb5c58c9fbbc/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7245/7340 [261:17<3:25, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7245/7340 [261:18<3:25, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:37,446 - agent.ComputerAgent - INFO - Computer: click({'x': 901, 'y': 579})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 901, 'y': 579})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:47:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7245/7340 [261:20<3:25, 27.7 steps/min]\u001b[92m19:47:38 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:39,450 - agent.ComputerAgent - INFO - Computer: click({'x': 1011, 'y': 194})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1011, 'y': 194})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:47:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7246/7340 [261:21<3:23, 27.7 steps/min]\u001b[92m19:47:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:40,815 - agent.ComputerAgent - INFO - Computer: click({'x': 243, 'y': 52})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 243, 'y': 52})\n", - "\u001b[92m19:47:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:41,487 - agent.ComputerAgent - INFO - Computer: click({'x': 259, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 259, 'y': 178})\n", - "\u001b[92m19:47:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7247/7340 [261:23<3:21, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:42,158 - agent.ComputerAgent - INFO - Computer: click({'x': 893, 'y': 296})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 893, 'y': 296})\n", - "2025-08-11 19:47:42,787 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:47:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7250/7340 [261:28<3:14, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:48,590 - agent.ComputerAgent - INFO - LLM processing started with 12 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 12 messages\n", - "\u001b[92m19:47:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:47:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7250/7340 [261:30<3:14, 27.7 steps/min]\u001b[92m19:47:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:49,265 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:47:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:47:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7250/7340 [261:31<3:14, 27.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:50,594 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:47:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:47:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:51,304 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:47:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:47:51,990 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 275, 'y': 181}, {'x': 79, 'y': 182}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 275, 'y': 181}, {'x': 79, 'y': 182}]})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7250/7340 [261:33<3:14, 27.7 steps/min]\u001b[92m19:47:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:52,662 - agent.ComputerAgent - INFO - Computer: click({'x': 252, 'y': 230})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 252, 'y': 230})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7252/7340 [261:35<3:10, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:56,131 - agent.ComputerAgent - INFO - Computer: type({'text': '=A2+B2'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': '=A2+B2'})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7252/7340 [261:37<3:10, 27.7 steps/min]\u001b[92m19:47:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:47:56,793 - agent.ComputerAgent - INFO - Computer: click({'x': 563, 'y': 101})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 563, 'y': 101})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:47:58,146 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'alt+left'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'alt+left'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:47:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7253/7340 [261:40<3:08, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:47:59,467 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:47:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:48:00,135 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:48:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:48:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7254/7340 [261:41<3:06, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:00,837 - agent.ComputerAgent - INFO - Computer: click({'x': 111, 'y': 52})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 111, 'y': 52})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7254/7340 [261:42<3:06, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/ba3f752c-7268-49e8-819c-5b471e52bd54/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7255/7340 [261:44<3:04, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7255/7340 [261:45<3:04, 27.7 steps/min]\u001b[92m19:48:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:04,749 - agent.ComputerAgent - INFO - Computer: click({'x': 19, 'y': 45})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 19, 'y': 45})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7255/7340 [261:47<3:04, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:48:06,105 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:48:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:48:06,746 - agent.ComputerAgent - INFO - LLM processing started with 14 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 14 messages\n", - "\u001b[92m19:48:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7256/7340 [261:48<3:01, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:48:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:07,447 - agent.ComputerAgent - INFO - Computer: click({'x': 273, 'y': 90})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 273, 'y': 90})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7256/7340 [261:49<3:01, 27.7 steps/min]2025-08-11 19:48:08,626 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:48:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7257/7340 [261:51<2:59, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7257/7340 [261:52<2:59, 27.7 steps/min]2025-08-11 19:48:11,495 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:48:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:48:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:48:11 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7257/7340 [261:53<2:59, 27.7 steps/min]\u001b[92m19:48:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:12,687 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 211, 'y': 178}, {'x': 211, 'y': 473}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 211, 'y': 178}, {'x': 211, 'y': 473}]})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7257/7340 [261:54<2:59, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:14,357 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:48:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7258/7340 [261:56<2:57, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7263/7340 [261:57<2:46, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:17,188 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/40c5f987-3d81-47fe-8798-4e45d9755f93/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7263/7340 [261:59<2:46, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:19,057 - agent.ComputerAgent - INFO - Computer: click({'x': 257, 'y': 152})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 257, 'y': 152})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7265/7340 [262:03<2:42, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:23,287 - agent.ComputerAgent - INFO - LLM processing started with 16 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 16 messages\n", - "\u001b[92m19:48:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:48:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/31367309-0055-409a-a992-edf729fb010c/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7265/7340 [262:05<2:42, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:48:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:48:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7265/7340 [262:07<2:42, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:48:25,935 - agent.ComputerAgent - INFO - Computer: click({'x': 95, 'y': 74})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 95, 'y': 74})\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:48:26,606 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:48:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7266/7340 [262:08<2:40, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:48:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:27,282 - agent.ComputerAgent - INFO - Computer: click({'x': 986, 'y': 133})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 986, 'y': 133})\n", - "\u001b[92m19:48:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:27,901 - agent.ComputerAgent - INFO - Computer: click({'x': 528, 'y': 50})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 528, 'y': 50})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7267/7340 [262:09<2:38, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/68f513cf-ec21-4216-bab9-84c5bfcfce88/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7269/7340 [262:10<2:33, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:48:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:30,287 - agent.ComputerAgent - INFO - Computer: click({'x': 105, 'y': 230})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 105, 'y': 230})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:14<2:31, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:33,466 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:48:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:15<2:31, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:34,163 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:48:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:48:34,825 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:48:34 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:16<2:31, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:36,005 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:48:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:17<2:31, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:21<2:31, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:22<2:31, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:48:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:48:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:48:43,277 - agent.ComputerAgent - INFO - Computer: click({'x': 66, 'y': 164})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 66, 'y': 164})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7270/7340 [262:25<2:31, 27.7 steps/min]\u001b[92m19:48:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:48:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:48:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:48:44,471 - agent.ComputerAgent - INFO - Computer: click({'x': 248, 'y': 173})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 248, 'y': 173})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7271/7340 [262:26<2:29, 27.7 steps/min]\u001b[92m19:48:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:45,143 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 288, 'y': 153}, {'x': 81, 'y': 155}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 288, 'y': 153}, {'x': 81, 'y': 155}]})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7273/7340 [262:29<2:25, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7273/7340 [262:30<2:25, 27.7 steps/min]2025-08-11 19:48:49,336 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:48:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7273/7340 [262:31<2:25, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:50,547 - agent.ComputerAgent - INFO - LLM processing started with 18 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 18 messages\n", - "\u001b[92m19:48:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:51,906 - agent.ComputerAgent - INFO - Computer: type({'text': 'webui refresh 2023'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'webui refresh 2023'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7273/7340 [262:33<2:25, 27.7 steps/min]2025-08-11 19:48:52,576 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:48:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:48:53,923 - agent.ComputerAgent - INFO - Computer: type({'text': 'Thunderbird'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'Thunderbird'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7274/7340 [262:36<2:22, 27.7 steps/min]\u001b[92m19:48:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:48:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:48:56,172 - agent.ComputerAgent - INFO - Computer: double_click({'x': 144, 'y': 167})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 144, 'y': 167})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7276/7340 [262:39<2:18, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:48:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7276/7340 [262:40<2:18, 27.7 steps/min]\u001b[92m19:48:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:00,051 - agent.ComputerAgent - INFO - Computer: click({'x': 761, 'y': 229})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 761, 'y': 229})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:00,717 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:49:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7276/7340 [262:42<2:18, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7277/7340 [262:43<2:16, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:49:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:49:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7277/7340 [262:44<2:16, 27.7 steps/min]2025-08-11 19:49:03,565 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:49:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:49:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:04,222 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 237, 'y': 178}, {'x': 258, 'y': 280}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 237, 'y': 178}, {'x': 258, 'y': 280}]})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7277/7340 [262:45<2:16, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/fa081188-4859-4858-9d33-0f9675111182/close \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:46<2:14, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:06,401 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:49:06 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:48<2:14, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:51<2:14, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:10,616 - agent.ComputerAgent - INFO - LLM processing started with 20 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 20 messages\n", - "\u001b[92m19:49:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:52<2:14, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:53<2:14, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:49:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:55<2:14, 27.7 steps/min]\u001b[92m19:49:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:49:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:49:14,305 - agent.ComputerAgent - INFO - Computer: click({'x': 1008, 'y': 223})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1008, 'y': 223})\n", - "\u001b[92m19:49:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:49:14,966 - agent.ComputerAgent - INFO - Computer: click({'x': 75, 'y': 135})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 75, 'y': 135})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7278/7340 [262:56<2:14, 27.7 steps/min]\u001b[92m19:49:15 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:15,652 - agent.ComputerAgent - INFO - Computer: double_click({'x': 540, 'y': 128})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 540, 'y': 128})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7281/7340 [262:58<2:07, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7281/7340 [262:59<2:07, 27.7 steps/min]\u001b[92m19:49:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:18,506 - agent.ComputerAgent - INFO - Computer: click({'x': 229, 'y': 157})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 229, 'y': 157})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7281/7340 [263:00<2:07, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:20,217 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:49:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7282/7340 [263:02<2:05, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7282/7340 [263:03<2:05, 27.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:49:23,091 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:49:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:49:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7282/7340 [263:04<2:05, 27.7 steps/min]2025-08-11 19:49:23,787 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 178})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:24,438 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:49:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:49:25,775 - agent.ComputerAgent - INFO - Computer: type({'text': \"=VLOOKUP(C2,'Retail Price'.$A$2:$B$200,2,0)*E2*(1-F2)\"})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': \"=VLOOKUP(C2,'Retail Price'.$A$2:$B$200,2,0)*E2*(1-F2)\"})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7282/7340 [263:07<2:05, 27.7 steps/min]2025-08-11 19:49:26,452 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:49:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7284/7340 [263:12<2:01, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:31,690 - agent.ComputerAgent - INFO - LLM processing started with 22 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 22 messages\n", - "\u001b[92m19:49:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7284/7340 [263:14<2:01, 27.7 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:49:33,032 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:49:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:49:33 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:33,738 - agent.ComputerAgent - INFO - Computer: click({'x': 1004, 'y': 60})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 1004, 'y': 60})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7285/7340 [263:16<1:59, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7285/7340 [263:17<1:59, 27.7 steps/min]\u001b[92m19:49:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:36,569 - agent.ComputerAgent - INFO - Computer: click({'x': 980, 'y': 60})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 980, 'y': 60})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:20<1:57, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:21<1:57, 27.7 steps/min]2025-08-11 19:49:40,273 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:49:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:22<1:57, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:42,639 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'CTRL+H'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'CTRL+H'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:24<1:57, 27.7 steps/min]2025-08-11 19:49:43,312 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:49:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:25<1:57, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:49:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:49:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:26<1:57, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:49:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:49:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:27<1:57, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:49:47,535 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "\u001b[92m19:49:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:49:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:48,871 - agent.ComputerAgent - INFO - Computer: screenshot({})\n", - "INFO:agent.ComputerAgent:Computer: screenshot({})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7286/7340 [263:30<1:57, 27.6 steps/min]2025-08-11 19:49:49,547 - agent.ComputerAgent - INFO - Computer: click({'x': 259, 'y': 180})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 259, 'y': 180})\n", - "2025-08-11 19:49:50,246 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 55, 'y': 164}, {'x': 55, 'y': 600}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 55, 'y': 164}, {'x': 55, 'y': 600}]})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7291/7340 [263:33<1:46, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/8e75deb1-3c97-408b-8c7d-f4681b322141/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7291/7340 [263:34<1:46, 27.7 steps/min]\u001b[92m19:49:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 19:49:53,552 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:49:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:49:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:49:54,210 - agent.ComputerAgent - INFO - Computer: click({'x': 867, 'y': 296})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 867, 'y': 296})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7291/7340 [263:35<1:46, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:55,903 - agent.ComputerAgent - INFO - LLM processing started with 24 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 24 messages\n", - "\u001b[92m19:49:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:37<1:44, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:49:56,562 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:49:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:49:57,261 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:49:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:39<1:44, 27.7 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:41<1:44, 27.7 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:42<1:44, 27.7 steps/min]2025-08-11 19:50:00,993 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:50:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:49<1:44, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:50:08,913 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ctrl+c'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ctrl+c'})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:50<1:44, 27.6 steps/min]2025-08-11 19:50:10,074 - agent.ComputerAgent - INFO - LLM processing started with 26 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 26 messages\n", - "\u001b[92m19:50:10 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:51<1:44, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:52<1:44, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:12 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:54<1:44, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:55<1:44, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:50:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:50:14,024 - agent.ComputerAgent - INFO - Computer: click({'x': 871, 'y': 135})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 871, 'y': 135})\n", - "\u001b[92m19:50:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:50:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7292/7340 [263:56<1:44, 27.6 steps/min]\u001b[92m19:50:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:15,204 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 275, 'y': 152}, {'x': 79, 'y': 154}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 275, 'y': 152}, {'x': 79, 'y': 154}]})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7293/7340 [263:57<1:42, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:50:17,610 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ALT+TAB'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ALT+TAB'})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7294/7340 [263:59<1:39, 27.6 steps/min]2025-08-11 19:50:18,781 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:50:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7294/7340 [264:00<1:39, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7294/7340 [264:01<1:39, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:50:21,132 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:50:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7294/7340 [264:02<1:39, 27.6 steps/min]2025-08-11 19:50:22,169 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:50:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7294/7340 [264:03<1:39, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:50:22 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:22,830 - agent.ComputerAgent - INFO - Computer: click({'x': 120, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 120, 'y': 53})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:50:24,191 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 257, 'y': 153}, {'x': 259, 'y': 281}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 257, 'y': 153}, {'x': 259, 'y': 281}]})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7296/7340 [264:08<1:35, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7296/7340 [264:09<1:35, 27.6 steps/min]\u001b[92m19:50:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:29,058 - agent.ComputerAgent - INFO - Computer: click({'x': 749, 'y': 229})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 749, 'y': 229})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7296/7340 [264:10<1:35, 27.6 steps/min]2025-08-11 19:50:29,733 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:50:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:50:30,431 - agent.ComputerAgent - INFO - LLM processing started with 28 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 28 messages\n", - "\u001b[92m19:50:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7297/7340 [264:12<1:33, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7297/7340 [264:16<1:33, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7297/7340 [264:17<1:33, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:50:36,275 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:50:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:50:36 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:37,340 - agent.ComputerAgent - INFO - Computer: click({'x': 229, 'y': 91})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 229, 'y': 91})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:21<1:31, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:50:41,280 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ALT+TAB'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ALT+TAB'})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:23<1:31, 27.6 steps/min]2025-08-11 19:50:42,453 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:50:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:24<1:31, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:25<1:31, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:50:44,654 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:50:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:26<1:31, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:28<1:31, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:29<1:31, 27.6 steps/min]\u001b[92m19:50:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:48,511 - agent.ComputerAgent - INFO - Computer: click({'x': 243, 'y': 178})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 243, 'y': 178})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7298/7340 [264:30<1:31, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7299/7340 [264:31<1:29, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:50:50 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:50,849 - agent.ComputerAgent - INFO - Computer: click({'x': 138, 'y': 90})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 138, 'y': 90})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7299/7340 [264:32<1:29, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7300/7340 [264:33<1:26, 27.6 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:50:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:50:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7300/7340 [264:34<1:26, 27.6 steps/min]\u001b[92m19:50:53 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:53,680 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 749, 'y': 183}, {'x': 837, 'y': 244}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 749, 'y': 183}, {'x': 837, 'y': 244}]})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:50:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7300/7340 [264:36<1:26, 27.6 steps/min]\u001b[92m19:50:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:50:55,684 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:50:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:50:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:50:56,394 - agent.ComputerAgent - INFO - Computer: click({'x': 258, 'y': 155})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 258, 'y': 155})\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7301/7340 [264:38<1:24, 27.6 steps/min]\u001b[92m19:50:56 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:50:57,053 - agent.ComputerAgent - INFO - Computer: click({'button': 'right', 'x': 118, 'y': 182})\n", - "INFO:agent.ComputerAgent:Computer: click({'button': 'right', 'x': 118, 'y': 182})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:50:57,705 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:50:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7302/7340 [264:39<1:22, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7303/7340 [264:42<1:20, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:01,944 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:51:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7303/7340 [264:43<1:20, 27.6 steps/min]2025-08-11 19:51:02,644 - agent.ComputerAgent - INFO - LLM processing started with 30 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 30 messages\n", - "\u001b[92m19:51:02 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7303/7340 [264:44<1:20, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:04,313 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:51:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7303/7340 [264:46<1:20, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - " 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7303/7340 [264:47<1:20, 27.6 steps/min]\u001b[92m19:51:05 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:06,170 - agent.ComputerAgent - INFO - Computer: click({'x': 285, 'y': 98})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 285, 'y': 98})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7304/7340 [264:53<1:18, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:12,936 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:51:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7304/7340 [264:54<1:18, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7304/7340 [264:57<1:18, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:51:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7304/7340 [264:59<1:18, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:51:17 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:51:18,032 - agent.ComputerAgent - INFO - Computer: click({'x': 259, 'y': 180})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 259, 'y': 180})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7304/7340 [265:00<1:18, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:51:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:51:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:51:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:51:19,864 - agent.ComputerAgent - INFO - Computer: click({'x': 151, 'y': 232})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 151, 'y': 232})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7305/7340 [265:01<1:16, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:51:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:51:21,195 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 298, 'y': 152}, {'x': 81, 'y': 155}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 298, 'y': 152}, {'x': 81, 'y': 155}]})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7306/7340 [265:02<1:14, 27.6 steps/min]\u001b[92m19:51:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:21,882 - agent.ComputerAgent - INFO - Computer: click({'x': 59, 'y': 157})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 59, 'y': 157})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7307/7340 [265:03<1:11, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7308/7340 [265:04<1:09, 27.6 steps/min]2025-08-11 19:51:23,543 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:51:23 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7308/7340 [265:06<1:09, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7308/7340 [265:07<1:09, 27.6 steps/min]\u001b[92m19:51:26 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:26,961 - agent.ComputerAgent - INFO - Computer: click({'x': 762, 'y': 230})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 762, 'y': 230})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:27,615 - agent.ComputerAgent - INFO - LLM processing started with 32 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 32 messages\n", - "\u001b[92m19:51:27 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7308/7340 [265:09<1:09, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:28,303 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:51:28 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:51:28,994 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:51:29 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:15<1:07, 27.6 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:35,225 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:51:35 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:17<1:07, 27.6 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:18<1:07, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:38,065 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'ALT+TAB'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'ALT+TAB'})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:19<1:07, 27.5 steps/min]2025-08-11 19:51:39,205 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:51:39 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:21<1:07, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:22<1:07, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7309/7340 [265:23<1:07, 27.5 steps/min]\u001b[92m19:51:41 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:42,081 - agent.ComputerAgent - INFO - Computer: click({'x': 237, 'y': 95})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 237, 'y': 95})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7310/7340 [265:25<1:05, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7310/7340 [265:26<1:05, 27.5 steps/min]\u001b[92m19:51:44 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:51:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7310/7340 [265:27<1:05, 27.5 steps/min]\u001b[92m19:51:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:51:45 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:51:46 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:46,954 - agent.ComputerAgent - INFO - Computer: drag({'start_element_description': 'Cell C2', 'end_element_description': 'Cell C10', 'x': 268, 'y': 188})\n", - "INFO:agent.ComputerAgent:Computer: drag({'start_element_description': 'Cell C2', 'end_element_description': 'Cell C10', 'x': 268, 'y': 188})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7311/7340 [265:29<1:03, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 19:51:48,615 - agent.ComputerAgent - INFO - LLM processing started with 35 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 35 messages\n", - "\u001b[92m19:51:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7311/7340 [265:30<1:03, 27.5 steps/min]2025-08-11 19:51:49,299 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:51:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7311/7340 [265:31<1:03, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7311/7340 [265:32<1:03, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "\u001b[92m19:51:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:52,164 - agent.ComputerAgent - INFO - Computer: click({'x': 87, 'y': 166})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 87, 'y': 166})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7312/7340 [265:33<1:01, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:52,805 - agent.ComputerAgent - INFO - LLM processing started with 37 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 37 messages\n", - "\u001b[92m19:51:52 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7313/7340 [265:35<0:58, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:54 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7313/7340 [265:36<0:58, 27.5 steps/min]\u001b[92m19:51:55 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:51:55,701 - agent.ComputerAgent - INFO - Computer: double_click({'x': 841, 'y': 244})\n", - "INFO:agent.ComputerAgent:Computer: double_click({'x': 841, 'y': 244})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7313/7340 [265:37<0:58, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7315/7340 [265:38<0:54, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:51:57,894 - agent.ComputerAgent - INFO - LLM processing started with 39 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 39 messages\n", - "\u001b[92m19:51:57 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "2025-08-11 19:51:58,566 - agent.ComputerAgent - INFO - LLM processing started with 34 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 34 messages\n", - "\u001b[92m19:51:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7315/7340 [265:40<0:54, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:51:59 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7315/7340 [265:41<0:54, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:52:00 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:00,770 - agent.ComputerAgent - INFO - Computer: click({'x': 422, 'y': 360})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 422, 'y': 360})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7315/7340 [265:42<0:54, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:01,926 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:52:01 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7316/7340 [265:43<0:52, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7317/7340 [265:44<0:50, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:52:03 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7317/7340 [265:45<0:50, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:04,815 - agent.ComputerAgent - INFO - LLM processing started with 41 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 41 messages\n", - "\u001b[92m19:52:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:52:04 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:05,539 - agent.ComputerAgent - INFO - Computer: click({'x': 15, 'y': 526})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 15, 'y': 526})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7317/7340 [265:47<0:50, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:07,328 - agent.ComputerAgent - INFO - Computer: type({'text': 'contacts.csv'})\n", - "INFO:agent.ComputerAgent:Computer: type({'text': 'contacts.csv'})\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7319/7340 [265:49<0:45, 27.5 steps/min]2025-08-11 19:52:07,988 - agent.ComputerAgent - INFO - LLM processing started with 36 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 36 messages\n", - "\u001b[92m19:52:08 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7320/7340 [265:50<0:43, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:09,185 - agent.ComputerAgent - INFO - LLM processing started with 43 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 43 messages\n", - "\u001b[92m19:52:09 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7320/7340 [265:53<0:43, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7320/7340 [265:54<0:43, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7321/7340 [265:55<0:41, 27.5 steps/min]2025-08-11 19:52:13,898 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:52:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7321/7340 [265:58<0:41, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:52:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7321/7340 [265:59<0:41, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:52:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:52:18 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/9053f5ae-149f-4a73-a89e-977f3e750435/close \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7325/7340 [266:00<0:32, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/26dc2412-0699-4a4e-a272-dc576348a5c8/close \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:52:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:52:19 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7325/7340 [266:01<0:32, 27.5 steps/min]\u001b[92m19:52:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:20,887 - agent.ComputerAgent - INFO - Computer: drag({'start_element_description': 'column header A', 'end_element_description': 'column header H', 'x': 90, 'y': 155})\n", - "INFO:agent.ComputerAgent:Computer: drag({'start_element_description': 'column header A', 'end_element_description': 'column header H', 'x': 90, 'y': 155})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7326/7340 [266:02<0:30, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:23,480 - agent.ComputerAgent - INFO - Computer: keypress({'keys': 'enter'})\n", - "INFO:agent.ComputerAgent:Computer: keypress({'keys': 'enter'})\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7326/7340 [266:05<0:30, 27.5 steps/min]\u001b[92m19:52:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No screenshot found, taking screenshot\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-08-11 19:52:24,777 - agent.ComputerAgent - INFO - LLM processing started with 37 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 37 messages\n", - "\u001b[92m19:52:24 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7327/7340 [266:06<0:28, 27.5 steps/min]\u001b[92m19:52:25 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:26,307 - agent.ComputerAgent - INFO - Computer: click({'x': 828, 'y': 35})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 828, 'y': 35})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7328/7340 [266:10<0:26, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7329/7340 [266:11<0:23, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:30,881 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:52:30 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7329/7340 [266:12<0:23, 27.5 steps/min]2025-08-11 19:52:31,557 - agent.ComputerAgent - INFO - LLM processing started with 39 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 39 messages\n", - "\u001b[92m19:52:31 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7329/7340 [266:13<0:23, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:32,766 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:52:32 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7329/7340 [266:19<0:23, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7330/7340 [266:20<0:21, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:52:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7330/7340 [266:21<0:21, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:52:40,663 - agent.ComputerAgent - INFO - LLM processing started with 41 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 41 messages\n", - "\u001b[92m19:52:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:52:40 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:41,739 - agent.ComputerAgent - INFO - Computer: click({'x': 328, 'y': 286})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 328, 'y': 286})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7331/7340 [266:26<0:19, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7332/7340 [266:27<0:17, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:46,968 - agent.ComputerAgent - INFO - LLM processing started with 43 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 43 messages\n", - "\u001b[92m19:52:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7332/7340 [266:29<0:17, 27.5 steps/min]\u001b[92m19:52:47 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "2025-08-11 19:52:48,348 - agent.ComputerAgent - INFO - LLM processing started with 38 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 38 messages\n", - "\u001b[92m19:52:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "\u001b[92m19:52:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "\u001b[92m19:52:48 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7332/7340 [266:30<0:17, 27.5 steps/min]\u001b[92m19:52:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:49,902 - agent.ComputerAgent - INFO - Computer: drag({'path': [{'x': 749, 'y': 229}, {'x': 749, 'y': 732}]})\n", - "INFO:agent.ComputerAgent:Computer: drag({'path': [{'x': 749, 'y': 229}, {'x': 749, 'y': 732}]})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7332/7340 [266:31<0:17, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:52:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7334/7340 [266:32<0:13, 27.5 steps/min]Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:52:51 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:52:52,289 - agent.ComputerAgent - INFO - Computer: click({'x': 17, 'y': 386})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 17, 'y': 386})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7334/7340 [266:34<0:13, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:37<0:10, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:38<0:10, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/invoke \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b14fe395-5fa2-43f0-9d0b-23c42f3e9093/close \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:39<0:10, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:52:58,539 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:52:58 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:40<0:10, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:41<0:10, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/cb64a220-43d8-4373-bd2a-e73bacb4a122/close \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:54<0:10, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:53:13 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7335/7340 [266:55<0:10, 27.5 steps/min]\u001b[92m19:53:14 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:53:14,687 - agent.ComputerAgent - INFO - Computer: click({'x': 318, 'y': 306})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 318, 'y': 306})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7336/7340 [266:57<0:08, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:53:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7336/7340 [266:58<0:08, 27.5 steps/min]\u001b[92m19:53:16 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:53:17,018 - agent.ComputerAgent - INFO - Computer: click({'x': 49, 'y': 53})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 49, 'y': 53})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7337/7340 [267:01<0:06, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "2025-08-11 19:53:20,724 - agent.ComputerAgent - INFO - LLM processing started with 40 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 40 messages\n", - "\u001b[92m19:53:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7337/7340 [267:02<0:06, 27.5 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7337/7340 [267:03<0:06, 27.5 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7337/7340 [267:23<0:06, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:53:42 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7337/7340 [267:24<0:06, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/invoke \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:53:43 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:53:43,990 - agent.ComputerAgent - INFO - Computer: click({'x': 432, 'y': 314})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 432, 'y': 314})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7337/7340 [267:25<0:06, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/b4eee866-c191-4acf-b232-9b18a3c888ef/close \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7338/7340 [267:29<0:04, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7338/7340 [267:30<0:04, 27.4 steps/min]2025-08-11 19:53:49,710 - agent.ComputerAgent - INFO - LLM processing started with 42 messages\n", - "INFO:agent.ComputerAgent:LLM processing started with 42 messages\n", - "\u001b[92m19:53:49 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= gpt-5; provider = openai\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7338/7340 [268:00<0:04, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "\u001b[92m19:54:20 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7338/7340 [268:02<0:04, 27.4 steps/min]INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "Setting `pad_token_id` to `eos_token_id`:151645 for open-end generation.\n", - "\u001b[92m19:54:21 - LiteLLM:INFO\u001b[0m: utils.py:3258 - \n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "INFO:LiteLLM:\n", - "LiteLLM completion() model= HelloKKMe/GTA1-7B; provider = huggingface-local\n", - "2025-08-11 19:54:21,613 - agent.ComputerAgent - INFO - Computer: click({'x': 469, 'y': 487})\n", - "INFO:agent.ComputerAgent:Computer: click({'x': 469, 'y': 487})\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7339/7340 [268:08<0:02, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ-| 7339/7340 [268:12<0:02, 27.4 steps/min]INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/invoke \"HTTP/1.1 200 OK\"\n", - "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 7340/7340 [268:13<0:00, 27.4 steps/min]\n", - "INFO:httpx:HTTP Request: POST https://orchestration.hud.so/hud-gym/api/v2/environments/d71be89e-00e2-40e7-8b8d-38e36bc6d26c/close \"HTTP/1.1 200 OK\"\n", - "INFO:httpx:HTTP Request: GET https://orchestration.hud.so/hud-gym/api/v2/jobs/a2c1347a-2925-45ed-b86a-6b475b0dc4eb/trajectories \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'task_count': 360, 'avg_reward': 0.21677517254432735, 'success_rate': 18.333333333333332}\n", - "View results at: https://app.hud.so/jobs/a2c1347a-2925-45ed-b86a-6b475b0dc4eb\n" + "Job: osworld 95557e openai/computer-use-preview+anthropic/claude-opus-4-20250514\n", + "Total results: 369\n" ] } ], "source": [ - "from agent.integrations.hud import run_job\n", - "from hud import load_taskset\n", - "from hud.taskset import TaskSet\n", - "import logging\n", "import uuid\n", + "from agent.integrations.hud import run_full_dataset\n", "\n", - "# Load taskset\n", - "taskset = await load_taskset(\"OSWorld-Verified\")\n", - "# taskset = TaskSet(tasks=taskset[:20]) # limit to 10 tasks instead of all 370\n", + "models_to_test = [\n", + " \"openai/computer-use-preview+anthropic/claude-opus-4-20250514\",\n", + "]\n", + " \n", "\n", - "job_name = \"osworld-gta-gpt5\"\n", - "job_name = f\"{job_name}-{str(uuid.uuid4())[:4]}\"\n", + "for model in models_to_test:\n", + " # Full dataset evaluation (runs via HUD's run_dataset under the hood)\n", + " job_uuid = str(uuid.uuid4())[:6]\n", + " job_name = f\"osworld {job_uuid} {model}\"\n", "\n", - "# Run benchmark job\n", - "job = await run_job(\n", - " # model=\"openai/computer-use-preview\",\n", - " model=\"huggingface-local/HelloKKMe/GTA1-7B+openai/gpt-5\",\n", - " task_or_taskset=taskset,\n", - " job_name=job_name,\n", - " max_concurrent_tasks=20,\n", - " # add any extra ComputerAgent kwargs:\n", - " verbosity=logging.INFO, # Enable logging\n", - " trajectory_dir=f\"trajectories/{job_name}\" # Save trajectories locally\n", - ")\n", + " results = await run_full_dataset(\n", + " dataset=\"hud-evals/OSWorld-Verified-XLang\",\n", + " job_name=job_name, \n", + " model=model,\n", + " max_concurrent=20, \n", + " max_steps=75,\n", + " trajectory_dir=f\"trajectories/osworld_{job_uuid}\",\n", + " only_n_most_recent_images=3\n", + " )\n", "\n", - "# Get results OR view them at app.hud.so\n", - "print(await job.get_analytics())\n", - "print(f\"View results at: https://app.hud.so/jobs/{job.id}\")" + " # results is a list from hud.datasets.run_dataset; inspect/aggregate as needed\n", + " print(f\"Job: {job_name}\")\n", + " print(f\"Total results: {len(results)}\")" ] } ], diff --git a/pyproject.toml b/pyproject.toml index 215809a5..7405d286 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ description = "CUA (Computer Use Agent) mono-repo" license = { text = "MIT" } name = "cua-workspace" readme = "README.md" -requires-python = ">=3.12" +requires-python = "<3.14,>=3.12" version = "0.1.0" [project.urls] diff --git a/samples/community/global-online/README.md b/samples/community/global-online/README.md new file mode 100644 index 00000000..137cebe6 --- /dev/null +++ b/samples/community/global-online/README.md @@ -0,0 +1,3 @@ +# Global Online Hackathon Submission + +In construction πŸ— ️ \ No newline at end of file diff --git a/samples/community/hack-the-north/README.md b/samples/community/hack-the-north/README.md new file mode 100644 index 00000000..b25f7a8d --- /dev/null +++ b/samples/community/hack-the-north/README.md @@ -0,0 +1,3 @@ +# Hack the North Hackathon Submission + +In construction πŸ—οΈ \ No newline at end of file