diff --git a/Development.md b/Development.md
index 8d6e35751879..06bf415a7921 100644
--- a/Development.md
+++ b/Development.md
@@ -100,7 +100,7 @@ poetry run pytest ./tests/unit/test_*.py
To reduce build time (e.g., if no changes were made to the client-runtime component), you can use an existing Docker container image by
setting the SANDBOX_RUNTIME_CONTAINER_IMAGE environment variable to the desired Docker image.
-Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.24-nikolaik`
+Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.25-nikolaik`
## Develop inside Docker container
diff --git a/README.md b/README.md
index 22caad34c99f..51cf52943e94 100644
--- a/README.md
+++ b/README.md
@@ -43,17 +43,17 @@ See the [Running OpenHands](https://docs.all-hands.dev/modules/usage/installatio
system requirements and more information.
```bash
-docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik
+docker pull docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik
docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e LOG_ALL_EVENTS=true \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ~/.openhands-state:/.openhands-state \
-p 3000:3000 \
--add-host host.docker.internal:host-gateway \
--name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:0.24
+ docker.all-hands.dev/all-hands-ai/openhands:0.25
```
You'll find OpenHands running at [http://localhost:3000](http://localhost:3000)!
diff --git a/containers/dev/compose.yml b/containers/dev/compose.yml
index 50c8ed04563f..34245f04efaf 100644
--- a/containers/dev/compose.yml
+++ b/containers/dev/compose.yml
@@ -11,7 +11,7 @@ services:
- BACKEND_HOST=${BACKEND_HOST:-"0.0.0.0"}
- SANDBOX_API_HOSTNAME=host.docker.internal
#
- - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.24-nikolaik}
+ - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.25-nikolaik}
- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234}
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
ports:
diff --git a/docker-compose.yml b/docker-compose.yml
index 4353b7b6bb5b..6407f78b4030 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -7,7 +7,7 @@ services:
image: openhands:latest
container_name: openhands-app-${DATE:-}
environment:
- - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik}
+ - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik}
#- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of openhands-state for this user
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
ports:
diff --git a/docs/DOC_STYLE_GUIDE.md b/docs/DOC_STYLE_GUIDE.md
index a55af799b112..93b916b0e85a 100644
--- a/docs/DOC_STYLE_GUIDE.md
+++ b/docs/DOC_STYLE_GUIDE.md
@@ -46,3 +46,11 @@ docker run -it \
-e THAT=that
...
```
+
+### Referring to UI Elements
+
+When referencing UI elements, use ``.
+
+Example:
+1. Toggle the `Advanced` option
+2. Enter your model in the `Custom Model` textbox.
diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md
index 6a666e91f8d3..c00beec86d72 100644
--- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md
+++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md
@@ -52,7 +52,7 @@ LLM_API_KEY="sk_test_12345"
```bash
docker run -it \
--pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e SANDBOX_USER_ID=$(id -u) \
-e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
-e LLM_API_KEY=$LLM_API_KEY \
@@ -61,7 +61,7 @@ docker run -it \
-v /var/run/docker.sock:/var/run/docker.sock \
--add-host host.docker.internal:host-gateway \
--name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.24 \
+ docker.all-hands.dev/all-hands-ai/openhands:0.25 \
python -m openhands.core.cli
```
diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md
index a72cd57f0cc1..1151e1e60df8 100644
--- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md
+++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md
@@ -46,7 +46,7 @@ LLM_API_KEY="sk_test_12345"
```bash
docker run -it \
--pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e SANDBOX_USER_ID=$(id -u) \
-e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
-e LLM_API_KEY=$LLM_API_KEY \
@@ -56,6 +56,6 @@ docker run -it \
-v /var/run/docker.sock:/var/run/docker.sock \
--add-host host.docker.internal:host-gateway \
--name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.24 \
+ docker.all-hands.dev/all-hands-ai/openhands:0.25 \
python -m openhands.core.main -t "write a bash script that prints hi" --no-auto-continue
```
diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx
index 6a1789214923..79a9bf0acdb7 100644
--- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx
+++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx
@@ -13,16 +13,16 @@
La façon la plus simple d'exécuter OpenHands est avec Docker.
```bash
-docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik
+docker pull docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik
docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e LOG_ALL_EVENTS=true \
-v /var/run/docker.sock:/var/run/docker.sock \
-p 3000:3000 \
--add-host host.docker.internal:host-gateway \
--name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:0.24
+ docker.all-hands.dev/all-hands-ai/openhands:0.25
```
Vous pouvez également exécuter OpenHands en mode [headless scriptable](https://docs.all-hands.dev/modules/usage/how-to/headless-mode), en tant que [CLI interactive](https://docs.all-hands.dev/modules/usage/how-to/cli-mode), ou en utilisant l'[Action GitHub OpenHands](https://docs.all-hands.dev/modules/usage/how-to/github-action).
diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md
index 865489d34841..d256032b4c4c 100644
--- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md
+++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md
@@ -13,7 +13,7 @@ C'est le Runtime par défaut qui est utilisé lorsque vous démarrez OpenHands.
```
docker run # ...
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-v /var/run/docker.sock:/var/run/docker.sock \
# ...
```
diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md
index 57b95b719570..4aafa581294b 100644
--- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md
+++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md
@@ -50,7 +50,7 @@ LLM_API_KEY="sk_test_12345"
```bash
docker run -it \
--pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e SANDBOX_USER_ID=$(id -u) \
-e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
-e LLM_API_KEY=$LLM_API_KEY \
@@ -59,7 +59,7 @@ docker run -it \
-v /var/run/docker.sock:/var/run/docker.sock \
--add-host host.docker.internal:host-gateway \
--name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.24 \
+ docker.all-hands.dev/all-hands-ai/openhands:0.25 \
python -m openhands.core.cli
```
diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md
index 44a4b5bc6f63..a164f3a9ba8b 100644
--- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md
+++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md
@@ -47,7 +47,7 @@ LLM_API_KEY="sk_test_12345"
```bash
docker run -it \
--pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e SANDBOX_USER_ID=$(id -u) \
-e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
-e LLM_API_KEY=$LLM_API_KEY \
@@ -57,6 +57,6 @@ docker run -it \
-v /var/run/docker.sock:/var/run/docker.sock \
--add-host host.docker.internal:host-gateway \
--name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.24 \
+ docker.all-hands.dev/all-hands-ai/openhands:0.25 \
python -m openhands.core.main -t "write a bash script that prints hi" --no-auto-continue
```
diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx
index 2d20773af4bc..4988d8d4c7da 100644
--- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx
+++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx
@@ -11,16 +11,16 @@
在 Docker 中运行 OpenHands 是最简单的方式。
```bash
-docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik
+docker pull docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik
docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e LOG_ALL_EVENTS=true \
-v /var/run/docker.sock:/var/run/docker.sock \
-p 3000:3000 \
--add-host host.docker.internal:host-gateway \
--name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:0.24
+ docker.all-hands.dev/all-hands-ai/openhands:0.25
```
你也可以在可脚本化的[无头模式](https://docs.all-hands.dev/modules/usage/how-to/headless-mode)下运行 OpenHands,作为[交互式 CLI](https://docs.all-hands.dev/modules/usage/how-to/cli-mode),或使用 [OpenHands GitHub Action](https://docs.all-hands.dev/modules/usage/how-to/github-action)。
diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md
index 5786ce571c81..e2d4bde47a2e 100644
--- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md
+++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md
@@ -11,7 +11,7 @@
```
docker run # ...
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-v /var/run/docker.sock:/var/run/docker.sock \
# ...
```
diff --git a/docs/modules/usage/how-to/cli-mode.md b/docs/modules/usage/how-to/cli-mode.md
index 612f1590eac9..630f63b9697a 100644
--- a/docs/modules/usage/how-to/cli-mode.md
+++ b/docs/modules/usage/how-to/cli-mode.md
@@ -35,7 +35,7 @@ To run OpenHands in CLI mode with Docker:
```bash
docker run -it \
--pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e SANDBOX_USER_ID=$(id -u) \
-e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
-e LLM_API_KEY=$LLM_API_KEY \
@@ -45,7 +45,7 @@ docker run -it \
-v ~/.openhands-state:/.openhands-state \
--add-host host.docker.internal:host-gateway \
--name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.24 \
+ docker.all-hands.dev/all-hands-ai/openhands:0.25 \
python -m openhands.core.cli
```
diff --git a/docs/modules/usage/how-to/gui-mode.md b/docs/modules/usage/how-to/gui-mode.md
index 483f8869e9eb..200e4ce3e0dc 100644
--- a/docs/modules/usage/how-to/gui-mode.md
+++ b/docs/modules/usage/how-to/gui-mode.md
@@ -1,9 +1,6 @@
# GUI Mode
-## Introduction
-
-OpenHands provides a user-friendly Graphical User Interface (GUI) mode for interacting with the AI assistant.
-This mode offers an intuitive way to set up the environment, manage settings, and communicate with the AI.
+OpenHands provides a Graphical User Interface (GUI) mode for interacting with the AI assistant.
## Installation and Setup
@@ -14,104 +11,95 @@ This mode offers an intuitive way to set up the environment, manage settings, an
### Initial Setup
-1. Upon first launch, you'll see a settings modal.
-2. Select an `LLM Provider` and `LLM Model` from the dropdown menus.
+1. Upon first launch, you'll see a settings page.
+2. Select an `LLM Provider` and `LLM Model` from the dropdown menus. If the required model does not exist in the list,
+ toggle `Advanced` options and enter it with the correct prefix in the `Custom Model` text box.
3. Enter the corresponding `API Key` for your chosen provider.
-4. Click "Save" to apply the settings.
+4. Click `Save Changes` to apply the settings.
### GitHub Token Setup
OpenHands automatically exports a `GITHUB_TOKEN` to the shell environment if it is available. This can happen in two ways:
-- **Locally (OSS)**: The user directly inputs their GitHub token.
-- **Online (SaaS)**: The token is obtained through GitHub OAuth authentication.
-
-#### Setting Up a Local GitHub Token
-
-1. **Generate a Personal Access Token (PAT)**:
- - Go to GitHub Settings > Developer Settings > Personal Access Tokens > Tokens (classic).
- - Click "Generate new token (classic)".
+- **Local Installation**: The user directly inputs their GitHub token.
+
+ Setting Up a GitHub Token
+ 1. **Generate a Personal Access Token (PAT)**:
+ - On GitHub, go to Settings > Developer Settings > Personal Access Tokens > Tokens (classic).
+ - Click `Generate new token (classic)`.
- Required scopes:
- `repo` (Full control of private repositories)
- - `workflow` (Update GitHub Action workflows)
- - `read:org` (Read organization data)
+ 2. **Enter Token in OpenHands**:
+ - Click the Settings button (gear icon).
+ - Navigate to the `GitHub Settings` section.
+ - Paste your token in the `GitHub Token` field.
+ - Click `Save Changes` to apply the changes.
+
-2. **Enter Token in OpenHands**:
- - Click the Settings button (gear icon) in the top right.
- - Navigate to the "GitHub" section.
- - Paste your token in the "GitHub Token" field.
- - Click "Save" to apply the changes.
+
+ Organizational Token Policies
-#### Organizational Token Policies
+ If you're working with organizational repositories, additional setup may be required:
-If you're working with organizational repositories, additional setup may be required:
-
-1. **Check Organization Requirements**:
+ 1. **Check Organization Requirements**:
- Organization admins may enforce specific token policies.
- Some organizations require tokens to be created with SSO enabled.
- Review your organization's [token policy settings](https://docs.github.com/en/organizations/managing-programmatic-access-to-your-organization/setting-a-personal-access-token-policy-for-your-organization).
-
-2. **Verify Organization Access**:
+ 2. **Verify Organization Access**:
- Go to your token settings on GitHub.
- - Look for the organization under "Organization access".
- - If required, click "Enable SSO" next to your organization.
+ - Look for the organization under `Organization access`.
+ - If required, click `Enable SSO` next to your organization.
- Complete the SSO authorization process.
+
+
+
+ Troubleshooting
+
+ Common issues and solutions:
+
+ - **Token Not Recognized**:
+ - Ensure the token is properly saved in settings.
+ - Check that the token hasn't expired.
+ - Verify the token has the required scopes.
+ - Try regenerating the token.
+
+ - **Organization Access Denied**:
+ - Check if SSO is required but not enabled.
+ - Verify organization membership.
+ - Contact organization admin if token policies are blocking access.
-#### OAuth Authentication (Online Mode)
+ - **Verifying Token Works**:
+ - The app will show a green checkmark if the token is valid.
+ - Try accessing a repository to confirm permissions.
+ - Check the browser console for any error messages.
+
-When using OpenHands in online mode, the GitHub OAuth flow:
+- **OpenHands Cloud**: The token is obtained through GitHub OAuth authentication.
-1. Requests the following permissions:
+
+ OAuth Authentication
+
+ When using OpenHands Cloud, the GitHub OAuth flow requests the following permissions:
- Repository access (read/write)
- Workflow management
- Organization read access
-2. Authentication steps:
- - Click "Sign in with GitHub" when prompted.
+ To authenticate OpenHands:
+ - Click `Sign in with GitHub` when prompted.
- Review the requested permissions.
- Authorize OpenHands to access your GitHub account.
- If using an organization, authorize organization access if prompted.
-
-#### Troubleshooting
-
-Common issues and solutions:
-
-- **Token Not Recognized**:
- - Ensure the token is properly saved in settings.
- - Check that the token hasn't expired.
- - Verify the token has the required scopes.
- - Try regenerating the token.
-
-- **Organization Access Denied**:
- - Check if SSO is required but not enabled.
- - Verify organization membership.
- - Contact organization admin if token policies are blocking access.
-
-- **Verifying Token Works**:
- - The app will show a green checkmark if the token is valid.
- - Try accessing a repository to confirm permissions.
- - Check the browser console for any error messages.
- - Use the "Test Connection" button in settings if available.
+
### Advanced Settings
-1. Toggle `Advanced Options` to access additional settings.
+1. Inside the Settings page, toggle `Advanced` options to access additional settings.
2. Use the `Custom Model` text box to manually enter a model if it's not in the list.
3. Specify a `Base URL` if required by your LLM provider.
-### Main Interface
-
-The main interface consists of several key components:
-
-- **Chat Window**: The central area where you can view the conversation history with the AI assistant.
-- **Input Box**: Located at the bottom of the screen, use this to type your messages or commands to the AI.
-- **Send Button**: Click this to send your message to the AI.
-- **Settings Button**: A gear icon that opens the settings modal, allowing you to adjust your configuration at any time.
-- **Workspace Panel**: Displays the files and folders in your workspace, allowing you to navigate and view files, or the agent's past commands or web browsing history.
-
### Interacting with the AI
-1. Type your question, request, or task description in the input box.
+1. Type your prompt in the input box.
2. Click the send button or press Enter to submit your message.
3. The AI will process your input and provide a response in the chat window.
4. You can continue the conversation by asking follow-up questions or providing additional information.
diff --git a/docs/modules/usage/how-to/headless-mode.md b/docs/modules/usage/how-to/headless-mode.md
index b751dc3000d1..e68b1e494934 100644
--- a/docs/modules/usage/how-to/headless-mode.md
+++ b/docs/modules/usage/how-to/headless-mode.md
@@ -32,7 +32,7 @@ To run OpenHands in Headless mode with Docker:
```bash
docker run -it \
--pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e SANDBOX_USER_ID=$(id -u) \
-e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
-e LLM_API_KEY=$LLM_API_KEY \
@@ -43,7 +43,7 @@ docker run -it \
-v ~/.openhands-state:/.openhands-state \
--add-host host.docker.internal:host-gateway \
--name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.24 \
+ docker.all-hands.dev/all-hands-ai/openhands:0.25 \
python -m openhands.core.main -t "write a bash script that prints hi"
```
diff --git a/docs/modules/usage/installation.mdx b/docs/modules/usage/installation.mdx
index 72d5300f3f03..610be444fef4 100644
--- a/docs/modules/usage/installation.mdx
+++ b/docs/modules/usage/installation.mdx
@@ -12,7 +12,8 @@ A system with a modern processor and a minimum of **4GB RAM** is recommended to
MacOS
- ### Docker Desktop
+
+ **Docker Desktop**
1. [Install Docker Desktop on Mac](https://docs.docker.com/desktop/setup/install/mac-install).
2. Open Docker Desktop, go to `Settings > Advanced` and ensure `Allow the default Docker socket to be used` is enabled.
@@ -25,7 +26,7 @@ A system with a modern processor and a minimum of **4GB RAM** is recommended to
Tested with Ubuntu 22.04.
:::
- ### Docker Desktop
+ **Docker Desktop**
1. [Install Docker Desktop on Linux](https://docs.docker.com/desktop/setup/install/linux/).
@@ -33,12 +34,13 @@ A system with a modern processor and a minimum of **4GB RAM** is recommended to
Windows
- ### WSL
+
+ **WSL**
1. [Install WSL](https://learn.microsoft.com/en-us/windows/wsl/install).
2. Run `wsl --version` in powershell and confirm `Default Version: 2`.
- ### Docker Desktop
+ **Docker Desktop**
1. [Install Docker Desktop on Windows](https://docs.docker.com/desktop/setup/install/windows-install).
2. Open Docker Desktop, go to `Settings` and confirm the following:
@@ -56,17 +58,17 @@ A system with a modern processor and a minimum of **4GB RAM** is recommended to
The easiest way to run OpenHands is in Docker.
```bash
-docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik
+docker pull docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik
docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-e LOG_ALL_EVENTS=true \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ~/.openhands-state:/.openhands-state \
-p 3000:3000 \
--add-host host.docker.internal:host-gateway \
--name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:0.24
+ docker.all-hands.dev/all-hands-ai/openhands:0.25
```
You'll find OpenHands running at http://localhost:3000!
@@ -78,24 +80,22 @@ or run it on tagged issues with [a github action](https://docs.all-hands.dev/mod
## Setup
-Upon launching OpenHands, you'll see a settings modal. You **must** select an `LLM Provider` and `LLM Model` and enter a corresponding `API Key`.
+Upon launching OpenHands, you'll see a Settings page. You **must** select an `LLM Provider` and `LLM Model` and enter a corresponding `API Key`.
These can be changed at any time by selecting the `Settings` button (gear icon) in the UI.
-If the required `LLM Model` does not exist in the list, you can toggle `Advanced Options` and manually enter it with the correct prefix
+If the required model does not exist in the list, you can toggle `Advanced` options and manually enter it with the correct prefix
in the `Custom Model` text box.
-The `Advanced Options` also allow you to specify a `Base URL` if required.
+The `Advanced` options also allow you to specify a `Base URL` if required.
-
-

-

-
+Now you're ready to [get started with OpenHands](./getting-started).
## Versions
-The command above pulls the most recent stable release of OpenHands. You have other options as well:
-- For a specific release, use `docker.all-hands.dev/all-hands-ai/openhands:$VERSION`, replacing $VERSION with the version number.
-- We use semver, and release major, minor, and patch tags. So `0.9` will automatically point to the latest `0.9.x` release, and `0` will point to the latest `0.x.x` release.
-- For the most up-to-date development version, you can use `docker.all-hands.dev/all-hands-ai/openhands:main`. This version is unstable and is recommended for testing or development purposes only.
+The [docker command above](./installation#start-the-app) pulls the most recent stable release of OpenHands. You have other options as well:
+- For a specific release, replace $VERSION in `openhands:$VERSION` and `runtime:$VERSION`, with the version number.
+We use SemVer so `0.9` will automatically point to the latest `0.9.x` release, and `0` will point to the latest `0.x.x` release.
+- For the most up-to-date development version, replace $VERSION in `openhands:$VERSION` and `runtime:$VERSION`, with `main`.
+This version is unstable and is recommended for testing or development purposes only.
You can choose the tag that best suits your needs based on stability requirements and desired features.
diff --git a/docs/modules/usage/llms/azure-llms.md b/docs/modules/usage/llms/azure-llms.md
index 7046fe7bf536..84f16627ab31 100644
--- a/docs/modules/usage/llms/azure-llms.md
+++ b/docs/modules/usage/llms/azure-llms.md
@@ -25,7 +25,7 @@ You will need your ChatGPT deployment name which can be found on the deployments
<deployment-name> below.
:::
-1. Enable `Advanced Options`
+1. Enable `Advanced` options
2. Set the following:
- `Custom Model` to azure/<deployment-name>
- `Base URL` to your Azure API Base URL (e.g. `https://example-endpoint.openai.azure.com`)
diff --git a/docs/modules/usage/llms/google-llms.md b/docs/modules/usage/llms/google-llms.md
index d89ba389f057..74e9015ffb0a 100644
--- a/docs/modules/usage/llms/google-llms.md
+++ b/docs/modules/usage/llms/google-llms.md
@@ -10,7 +10,7 @@ OpenHands uses LiteLLM to make calls to Google's chat models. You can find their
When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings:
- `LLM Provider` to `Gemini`
- `LLM Model` to the model you will be using.
-If the model is not in the list, toggle `Advanced Options`, and enter it in `Custom Model` (e.g. gemini/<model-name> like `gemini/gemini-1.5-pro`).
+If the model is not in the list, toggle `Advanced` options, and enter it in `Custom Model` (e.g. gemini/<model-name> like `gemini/gemini-1.5-pro`).
- `API Key` to your Gemini API key
## VertexAI - Google Cloud Platform Configs
@@ -27,4 +27,4 @@ VERTEXAI_LOCATION=""
Then set the following in the OpenHands UI through the Settings:
- `LLM Provider` to `VertexAI`
- `LLM Model` to the model you will be using.
-If the model is not in the list, toggle `Advanced Options`, and enter it in `Custom Model` (e.g. vertex_ai/<model-name>).
+If the model is not in the list, toggle `Advanced` options, and enter it in `Custom Model` (e.g. vertex_ai/<model-name>).
diff --git a/docs/modules/usage/llms/groq.md b/docs/modules/usage/llms/groq.md
index d484d5e3a4e1..0de104cf1400 100644
--- a/docs/modules/usage/llms/groq.md
+++ b/docs/modules/usage/llms/groq.md
@@ -8,7 +8,7 @@ When running OpenHands, you'll need to set the following in the OpenHands UI thr
- `LLM Provider` to `Groq`
- `LLM Model` to the model you will be using. [Visit here to see the list of
models that Groq hosts](https://console.groq.com/docs/models). If the model is not in the list, toggle
-`Advanced Options`, and enter it in `Custom Model` (e.g. groq/<model-name> like `groq/llama3-70b-8192`).
+`Advanced` options, and enter it in `Custom Model` (e.g. groq/<model-name> like `groq/llama3-70b-8192`).
- `API key` to your Groq API key. To find or create your Groq API Key, [see here](https://console.groq.com/keys).
@@ -17,7 +17,7 @@ models that Groq hosts](https://console.groq.com/docs/models). If the model is n
The Groq endpoint for chat completion is [mostly OpenAI-compatible](https://console.groq.com/docs/openai). Therefore, you can access Groq models as you
would access any OpenAI-compatible endpoint. In the OpenHands UI through the Settings:
-1. Enable `Advanced Options`
+1. Enable `Advanced` options
2. Set the following:
- `Custom Model` to the prefix `openai/` + the model you will be using (e.g. `openai/llama3-70b-8192`)
- `Base URL` to `https://api.groq.com/openai/v1`
diff --git a/docs/modules/usage/llms/litellm-proxy.md b/docs/modules/usage/llms/litellm-proxy.md
index 9178bc5c33ea..21413e0ef191 100644
--- a/docs/modules/usage/llms/litellm-proxy.md
+++ b/docs/modules/usage/llms/litellm-proxy.md
@@ -8,7 +8,7 @@ To use LiteLLM proxy with OpenHands, you need to:
1. Set up a LiteLLM proxy server (see [LiteLLM documentation](https://docs.litellm.ai/docs/proxy/quick_start))
2. When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings:
- * Enable `Advanced Options`
+ * Enable `Advanced` options
* `Custom Model` to the prefix `litellm_proxy/` + the model you will be using (e.g. `litellm_proxy/anthropic.claude-3-5-sonnet-20241022-v2:0`)
* `Base URL` to your LiteLLM proxy URL (e.g. `https://your-litellm-proxy.com`)
* `API Key` to your LiteLLM proxy API key
diff --git a/docs/modules/usage/llms/llms.md b/docs/modules/usage/llms/llms.md
index f4fa118dd02e..c2b08d013491 100644
--- a/docs/modules/usage/llms/llms.md
+++ b/docs/modules/usage/llms/llms.md
@@ -38,7 +38,7 @@ The following can be set in the OpenHands UI through the Settings:
- `LLM Provider`
- `LLM Model`
- `API Key`
-- `Base URL` (through `Advanced Settings`)
+- `Base URL` (through `Advanced` settings)
There are some settings that may be necessary for some LLMs/providers that cannot be set through the UI. Instead, these
can be set through environment variables passed to the [docker run command](/modules/usage/installation#start-the-app)
diff --git a/docs/modules/usage/llms/openai-llms.md b/docs/modules/usage/llms/openai-llms.md
index 9157c7cac8bb..d0358989691a 100644
--- a/docs/modules/usage/llms/openai-llms.md
+++ b/docs/modules/usage/llms/openai-llms.md
@@ -8,7 +8,7 @@ When running OpenHands, you'll need to set the following in the OpenHands UI thr
* `LLM Provider` to `OpenAI`
* `LLM Model` to the model you will be using.
[Visit here to see a full list of OpenAI models that LiteLLM supports.](https://docs.litellm.ai/docs/providers/openai#openai-chat-completion-models)
-If the model is not in the list, toggle `Advanced Options`, and enter it in `Custom Model` (e.g. openai/<model-name> like `openai/gpt-4o`).
+If the model is not in the list, toggle `Advanced` options, and enter it in `Custom Model` (e.g. openai/<model-name> like `openai/gpt-4o`).
* `API Key` to your OpenAI API key. To find or create your OpenAI Project API Key, [see here](https://platform.openai.com/api-keys).
## Using OpenAI-Compatible Endpoints
@@ -18,7 +18,7 @@ Just as for OpenAI Chat completions, we use LiteLLM for OpenAI-compatible endpoi
## Using an OpenAI Proxy
If you're using an OpenAI proxy, in the OpenHands UI through the Settings:
-1. Enable `Advanced Options`
+1. Enable `Advanced` options
2. Set the following:
- `Custom Model` to openai/<model-name> (e.g. `openai/gpt-4o` or openai/<proxy-prefix>/<model-name>)
- `Base URL` to the URL of your OpenAI proxy
diff --git a/docs/modules/usage/llms/openrouter.md b/docs/modules/usage/llms/openrouter.md
index 247d0a0558f1..2b5204d26c82 100644
--- a/docs/modules/usage/llms/openrouter.md
+++ b/docs/modules/usage/llms/openrouter.md
@@ -8,5 +8,5 @@ When running OpenHands, you'll need to set the following in the OpenHands UI thr
* `LLM Provider` to `OpenRouter`
* `LLM Model` to the model you will be using.
[Visit here to see a full list of OpenRouter models](https://openrouter.ai/models).
-If the model is not in the list, toggle `Advanced Options`, and enter it in `Custom Model` (e.g. openrouter/<model-name> like `openrouter/anthropic/claude-3.5-sonnet`).
+If the model is not in the list, toggle `Advanced` options, and enter it in `Custom Model` (e.g. openrouter/<model-name> like `openrouter/anthropic/claude-3.5-sonnet`).
* `API Key` to your OpenRouter API key.
diff --git a/docs/modules/usage/runtimes.md b/docs/modules/usage/runtimes.md
index 740a53b00482..1fb2d0f4236d 100644
--- a/docs/modules/usage/runtimes.md
+++ b/docs/modules/usage/runtimes.md
@@ -16,7 +16,7 @@ some flags being passed to `docker run` that make this possible:
```
docker run # ...
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.25-nikolaik \
-v /var/run/docker.sock:/var/run/docker.sock \
# ...
```
diff --git a/docs/sidebars.ts b/docs/sidebars.ts
index da416ac30b91..f71e36a0b571 100644
--- a/docs/sidebars.ts
+++ b/docs/sidebars.ts
@@ -66,7 +66,7 @@ const sidebars: SidebarsConfig = {
},
{
type: 'doc',
- label: 'Github Actions',
+ label: 'Github Action',
id: 'usage/how-to/github-action',
},
{
diff --git a/docs/src/pages/index.tsx b/docs/src/pages/index.tsx
index a2df79a259a5..6f20f1eb776d 100644
--- a/docs/src/pages/index.tsx
+++ b/docs/src/pages/index.tsx
@@ -23,6 +23,17 @@ export default function Home(): JSX.Element {
})}
>
+
+
+
Most Popular Links
+
+
);
}
diff --git a/docs/static/img/settings-advanced.png b/docs/static/img/settings-advanced.png
deleted file mode 100644
index 43a9cf05ab83..000000000000
Binary files a/docs/static/img/settings-advanced.png and /dev/null differ
diff --git a/docs/static/img/settings-screenshot.png b/docs/static/img/settings-screenshot.png
deleted file mode 100644
index 987dd8c25570..000000000000
Binary files a/docs/static/img/settings-screenshot.png and /dev/null differ
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index f03e27c271f6..12eb28118046 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "openhands-frontend",
- "version": "0.24.0",
+ "version": "0.25.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "openhands-frontend",
- "version": "0.24.0",
+ "version": "0.25.0",
"dependencies": {
"@heroui/react": "2.6.14",
"@monaco-editor/react": "^4.7.0-rc.0",
diff --git a/frontend/package.json b/frontend/package.json
index b840a3e51418..4fc89d93629d 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -1,6 +1,6 @@
{
"name": "openhands-frontend",
- "version": "0.24.0",
+ "version": "0.25.0",
"private": true,
"type": "module",
"engines": {
diff --git a/openhands/agenthub/dummy_agent/agent.py b/openhands/agenthub/dummy_agent/agent.py
index f7a654bf75b4..b420a3d5d8ae 100644
--- a/openhands/agenthub/dummy_agent/agent.py
+++ b/openhands/agenthub/dummy_agent/agent.py
@@ -22,7 +22,6 @@
CmdOutputObservation,
FileReadObservation,
FileWriteObservation,
- NullObservation,
Observation,
)
from openhands.events.serialization.event import event_to_dict
@@ -109,7 +108,7 @@ def __init__(self, llm: LLM, config: AgentConfig):
},
{
'action': AgentRejectAction(),
- 'observations': [NullObservation('')],
+ 'observations': [AgentStateChangedObservation('', AgentState.REJECTED)],
},
{
'action': AgentFinishAction(
diff --git a/openhands/agenthub/micro/agent.py b/openhands/agenthub/micro/agent.py
index 2c22e3840a51..37de035c461d 100644
--- a/openhands/agenthub/micro/agent.py
+++ b/openhands/agenthub/micro/agent.py
@@ -6,11 +6,11 @@
from openhands.controller.state.state import State
from openhands.core.config import AgentConfig
from openhands.core.message import ImageContent, Message, TextContent
-from openhands.core.utils import json
from openhands.events.action import Action
from openhands.events.event import Event
from openhands.events.serialization.action import action_from_dict
from openhands.events.serialization.event import event_to_memory
+from openhands.io import json
from openhands.llm.llm import LLM
diff --git a/openhands/core/cli.py b/openhands/core/cli.py
index 1e31537155ac..05a390d8b815 100644
--- a/openhands/core/cli.py
+++ b/openhands/core/cli.py
@@ -29,8 +29,8 @@
AgentStateChangedObservation,
CmdOutputObservation,
FileEditObservation,
- NullObservation,
)
+from openhands.io import read_input, read_task
def display_message(message: str):
@@ -83,21 +83,6 @@ def display_event(event: Event, config: AppConfig):
display_confirmation(event.confirmation_state)
-def read_input(config: AppConfig) -> str:
- """Read input from user based on config settings."""
- if config.cli_multiline_input:
- print('Enter your message (enter "/exit" on a new line to finish):')
- lines = []
- while True:
- line = input('>> ').rstrip()
- if line == '/exit': # finish input
- break
- lines.append(line)
- return '\n'.join(lines)
- else:
- return input('>> ').rstrip()
-
-
async def main(loop: asyncio.AbstractEventLoop):
"""Runs the agent in CLI mode."""
@@ -105,7 +90,14 @@ async def main(loop: asyncio.AbstractEventLoop):
logger.setLevel(logging.WARNING)
- config = setup_config_from_args(args)
+ # Load config from toml and override with command line arguments
+ config: AppConfig = setup_config_from_args(args)
+
+ # Read task from file, CLI args, or stdin
+ task_str = read_task(args, config.cli_multiline_input)
+
+ # If we have a task, create initial user action
+ initial_user_action = MessageAction(content=task_str) if task_str else None
sid = str(uuid4())
@@ -118,7 +110,9 @@ async def main(loop: asyncio.AbstractEventLoop):
async def prompt_for_next_task():
# Run input() in a thread pool to avoid blocking the event loop
- next_message = await loop.run_in_executor(None, read_input, config)
+ next_message = await loop.run_in_executor(
+ None, read_input, config.cli_multiline_input
+ )
if not next_message.strip():
await prompt_for_next_task()
if next_message == 'exit':
@@ -143,19 +137,18 @@ async def on_event_async(event: Event):
AgentState.FINISHED,
]:
await prompt_for_next_task()
- if (
- isinstance(event, NullObservation)
- and controller.state.agent_state == AgentState.AWAITING_USER_CONFIRMATION
- ):
- user_confirmed = await prompt_for_user_confirmation()
- if user_confirmed:
- event_stream.add_event(
- ChangeAgentStateAction(AgentState.USER_CONFIRMED), EventSource.USER
- )
- else:
- event_stream.add_event(
- ChangeAgentStateAction(AgentState.USER_REJECTED), EventSource.USER
- )
+ if event.agent_state == AgentState.AWAITING_USER_CONFIRMATION:
+ user_confirmed = await prompt_for_user_confirmation()
+ if user_confirmed:
+ event_stream.add_event(
+ ChangeAgentStateAction(AgentState.USER_CONFIRMED),
+ EventSource.USER,
+ )
+ else:
+ event_stream.add_event(
+ ChangeAgentStateAction(AgentState.USER_REJECTED),
+ EventSource.USER,
+ )
def on_event(event: Event) -> None:
loop.create_task(on_event_async(event))
@@ -164,7 +157,12 @@ def on_event(event: Event) -> None:
await runtime.connect()
- asyncio.create_task(prompt_for_next_task())
+ if initial_user_action:
+ # If there's an initial user action, enqueue it and do not prompt again
+ event_stream.add_event(initial_user_action, EventSource.USER)
+ else:
+ # Otherwise prompt for the user's first message right away
+ asyncio.create_task(prompt_for_next_task())
await run_agent_until_done(
controller, runtime, [AgentState.STOPPED, AgentState.ERROR]
diff --git a/openhands/core/config/llm_config.py b/openhands/core/config/llm_config.py
index cee22766df14..5497d7125823 100644
--- a/openhands/core/config/llm_config.py
+++ b/openhands/core/config/llm_config.py
@@ -102,3 +102,9 @@ def model_post_init(self, __context: Any):
os.environ['OR_SITE_URL'] = self.openrouter_site_url
if self.openrouter_app_name:
os.environ['OR_APP_NAME'] = self.openrouter_app_name
+
+ # Assign an API version for Azure models
+ # While it doesn't seem required, the format supported by the API without version seems old and will likely break.
+ # Azure issue: https://github.com/All-Hands-AI/OpenHands/issues/6777
+ if self.model.startswith('azure') and self.api_version is None:
+ self.api_version = '2024-08-01-preview'
diff --git a/openhands/core/main.py b/openhands/core/main.py
index 2652931cce7a..12e0c4e7876c 100644
--- a/openhands/core/main.py
+++ b/openhands/core/main.py
@@ -1,7 +1,6 @@
import asyncio
import json
import os
-import sys
from pathlib import Path
from typing import Callable, Protocol
@@ -29,6 +28,7 @@
from openhands.events.observation import AgentStateChangedObservation
from openhands.events.serialization import event_from_dict
from openhands.events.serialization.event import event_to_trajectory
+from openhands.io import read_input, read_task
from openhands.runtime.base import Runtime
@@ -41,32 +41,6 @@ def __call__(
) -> str: ...
-def read_task_from_file(file_path: str) -> str:
- """Read task from the specified file."""
- with open(file_path, 'r', encoding='utf-8') as file:
- return file.read()
-
-
-def read_task_from_stdin() -> str:
- """Read task from stdin."""
- return sys.stdin.read()
-
-
-def read_input(config: AppConfig) -> str:
- """Read input from user based on config settings."""
- if config.cli_multiline_input:
- print('Enter your message (enter "/exit" on a new line to finish):')
- lines = []
- while True:
- line = input('>> ').rstrip()
- if line == '/exit': # finish input
- break
- lines.append(line)
- return '\n'.join(lines)
- else:
- return input('>> ').rstrip()
-
-
async def run_controller(
config: AppConfig,
initial_user_action: Action,
@@ -139,7 +113,6 @@ async def run_controller(
assert isinstance(
initial_user_action, Action
), f'initial user actions must be an Action, got {type(initial_user_action)}'
- # Logging
logger.debug(
f'Agent Controller Initialized: Running agent {agent.name}, model '
f'{agent.llm.config.model}, with actions: {initial_user_action}'
@@ -167,7 +140,7 @@ def on_event(event: Event):
if exit_on_message:
message = '/exit'
elif fake_user_response_fn is None:
- message = read_input(config)
+ message = read_input(config.cli_multiline_input)
else:
message = fake_user_response_fn(controller.get_state())
action = MessageAction(content=message)
@@ -268,28 +241,23 @@ def load_replay_log(trajectory_path: str) -> tuple[list[Event] | None, Action]:
if __name__ == '__main__':
args = parse_arguments()
- config = setup_config_from_args(args)
+ config: AppConfig = setup_config_from_args(args)
- # Determine the task
- task_str = ''
- if args.file:
- task_str = read_task_from_file(args.file)
- elif args.task:
- task_str = args.task
- elif not sys.stdin.isatty():
- task_str = read_task_from_stdin()
+ # Read task from file, CLI args, or stdin
+ task_str = read_task(args, config.cli_multiline_input)
- initial_user_action: Action = NullAction()
if config.replay_trajectory_path:
if task_str:
raise ValueError(
'User-specified task is not supported under trajectory replay mode'
)
- elif task_str:
- initial_user_action = MessageAction(content=task_str)
- else:
+
+ if not task_str:
raise ValueError('No task provided. Please specify a task through -t, -f.')
+ # Create initial user action
+ initial_user_action: MessageAction = MessageAction(content=task_str)
+
# Set session name
session_name = args.name
sid = generate_sid(config, session_name)
diff --git a/openhands/core/utils/__init__.py b/openhands/core/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/openhands/events/stream.py b/openhands/events/stream.py
index 0fc547803f6d..938269822a7a 100644
--- a/openhands/events/stream.py
+++ b/openhands/events/stream.py
@@ -8,9 +8,9 @@
from typing import Callable, Iterable
from openhands.core.logger import openhands_logger as logger
-from openhands.core.utils import json
from openhands.events.event import Event, EventSource
from openhands.events.serialization.event import event_from_dict, event_to_dict
+from openhands.io import json
from openhands.storage import FileStore
from openhands.storage.locations import (
get_conversation_dir,
diff --git a/openhands/events/utils.py b/openhands/events/utils.py
index bf710edcd7bd..cfc2dd804c3a 100644
--- a/openhands/events/utils.py
+++ b/openhands/events/utils.py
@@ -10,7 +10,9 @@
def get_pairs_from_events(events: list[Event]) -> list[tuple[Action, Observation]]:
- """Return the history as a list of tuples (action, observation)."""
+ """Return the history as a list of tuples (action, observation).
+
+ This function is a compatibility function for evals reading and visualization working with old histories."""
tuples: list[tuple[Action, Observation]] = []
action_map: dict[int, Action] = {}
observation_map: dict[int, Observation] = {}
diff --git a/openhands/io/__init__.py b/openhands/io/__init__.py
new file mode 100644
index 000000000000..bf1a054356c1
--- /dev/null
+++ b/openhands/io/__init__.py
@@ -0,0 +1,10 @@
+from openhands.io.io import read_input, read_task, read_task_from_file
+from openhands.io.json import dumps, loads
+
+__all__ = [
+ 'read_input',
+ 'read_task_from_file',
+ 'read_task',
+ 'dumps',
+ 'loads',
+]
diff --git a/openhands/io/io.py b/openhands/io/io.py
new file mode 100644
index 000000000000..2e42df912b77
--- /dev/null
+++ b/openhands/io/io.py
@@ -0,0 +1,40 @@
+import argparse
+import sys
+
+
+def read_input(cli_multiline_input: bool = False) -> str:
+ """Read input from user based on config settings."""
+ if cli_multiline_input:
+ print('Enter your message (enter "/exit" on a new line to finish):')
+ lines = []
+ while True:
+ line = input('>> ').rstrip()
+ if line == '/exit': # finish input
+ break
+ lines.append(line)
+ return '\n'.join(lines)
+ else:
+ return input('>> ').rstrip()
+
+
+def read_task_from_file(file_path: str) -> str:
+ """Read task from the specified file."""
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def read_task(args: argparse.Namespace, cli_multiline_input: bool) -> str:
+ """
+ Read the task from the CLI args, file, or stdin.
+ """
+
+ # Determine the task
+ task_str = ''
+ if args.file:
+ task_str = read_task_from_file(args.file)
+ elif args.task:
+ task_str = args.task
+ elif not sys.stdin.isatty():
+ task_str = read_input(cli_multiline_input)
+
+ return task_str
diff --git a/openhands/core/utils/json.py b/openhands/io/json.py
similarity index 100%
rename from openhands/core/utils/json.py
rename to openhands/io/json.py
diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py
index a9071b43bed3..b40f11ca8396 100644
--- a/openhands/llm/llm.py
+++ b/openhands/llm/llm.py
@@ -172,7 +172,7 @@ def __init__(
)
def wrapper(*args, **kwargs):
"""Wrapper for the litellm completion function. Logs the input and output of the completion function."""
- from openhands.core.utils import json
+ from openhands.io import json
messages: list[dict[str, Any]] | dict[str, Any] = []
mock_function_calling = not self.is_function_calling_active()
@@ -369,7 +369,7 @@ def init_model_info(self):
# noinspection PyBroadException
except Exception:
pass
- from openhands.core.utils import json
+ from openhands.io import json
logger.debug(f'Model info: {json.dumps(self.model_info, indent=2)}')
diff --git a/openhands/memory/condenser/condenser.py b/openhands/memory/condenser/condenser.py
index 878c27aec140..411ed39386f6 100644
--- a/openhands/memory/condenser/condenser.py
+++ b/openhands/memory/condenser/condenser.py
@@ -161,6 +161,12 @@ def __init__(self) -> None:
@override
def condensed_history(self, state: State) -> list[Event]:
+ # The history should grow monotonically -- if it doesn't, something has
+ # truncated the history and we need to reset our tracking.
+ if len(state.history) < self._last_history_length:
+ self._condensation = []
+ self._last_history_length = 0
+
new_events = state.history[self._last_history_length :]
with self.metadata_batch(state):
diff --git a/openhands/memory/condenser/impl/llm_summarizing_condenser.py b/openhands/memory/condenser/impl/llm_summarizing_condenser.py
index 276d1c1e8748..54068f04547c 100644
--- a/openhands/memory/condenser/impl/llm_summarizing_condenser.py
+++ b/openhands/memory/condenser/impl/llm_summarizing_condenser.py
@@ -57,22 +57,29 @@ def condense(self, events: list[Event]) -> list[Event]:
# Construct prompt for summarization
prompt = """You are maintaining state history for an LLM-based code agent. Track:
+USER_CONTEXT: (Preserve essential user requirements, problem descriptions, and clarifications in concise form)
+
STATE: {File paths, function signatures, data structures}
TESTS: {Failing cases, error messages, outputs}
CHANGES: {Code edits, variable updates}
DEPS: {Dependencies, imports, external calls}
INTENT: {Why changes were made, acceptance criteria}
-SKIP: {Git clones, build logs}
-SUMMARIZE: {File listings}
-MAX_LENGTH: Keep summaries under 1000 words
+PRIORITIZE:
+1. Capture key user requirements and constraints
+2. Maintain critical problem context
+3. Keep all sections concise
+
+SKIP: {Git clones, build logs, file listings}
Example history format:
+USER_CONTEXT: Fix FITS card float representation - "0.009125" becomes "0.009124999999999999" causing comment truncation. Use Python's str() when possible while maintaining FITS compliance.
+
STATE: mod_float() in card.py updated
TESTS: test_format() passed
CHANGES: str(val) replaces f"{val:.16G}"
DEPS: None modified
-INTENT: Fix float precision overflow"""
+INTENT: Fix precision while maintaining FITS compliance"""
prompt + '\n\n'
diff --git a/openhands/runtime/action_execution_server.py b/openhands/runtime/action_execution_server.py
index d9ee74c4293a..1a2d4f1d4a7b 100644
--- a/openhands/runtime/action_execution_server.py
+++ b/openhands/runtime/action_execution_server.py
@@ -8,7 +8,6 @@
import argparse
import asyncio
import base64
-import io
import mimetypes
import os
import shutil
@@ -21,12 +20,13 @@
from fastapi import Depends, FastAPI, HTTPException, Request, UploadFile
from fastapi.exceptions import RequestValidationError
-from fastapi.responses import JSONResponse, StreamingResponse
+from fastapi.responses import FileResponse, JSONResponse
from fastapi.security import APIKeyHeader
from openhands_aci.editor.editor import OHEditor
from openhands_aci.editor.exceptions import ToolError
from openhands_aci.editor.results import ToolResult
from pydantic import BaseModel
+from starlette.background import BackgroundTask
from starlette.exceptions import HTTPException as StarletteHTTPException
from uvicorn import run
@@ -631,7 +631,7 @@ async def upload_file(
raise HTTPException(status_code=500, detail=str(e))
@app.get('/download_files')
- async def download_file(path: str):
+ def download_file(path: str):
logger.debug('Downloading files')
try:
if not os.path.isabs(path):
@@ -642,7 +642,7 @@ async def download_file(path: str):
if not os.path.exists(path):
raise HTTPException(status_code=404, detail='File not found')
- with tempfile.TemporaryFile() as temp_zip:
+ with tempfile.NamedTemporaryFile(suffix='.zip', delete=False) as temp_zip:
with ZipFile(temp_zip, 'w') as zipf:
for root, _, files in os.walk(path):
for file in files:
@@ -650,15 +650,11 @@ async def download_file(path: str):
zipf.write(
file_path, arcname=os.path.relpath(file_path, path)
)
- temp_zip.seek(0) # Rewind the file to the beginning after writing
- content = temp_zip.read()
- # Good for small to medium-sized files. For very large files, streaming directly from the
- # file chunks may be more memory-efficient.
- zip_stream = io.BytesIO(content)
- return StreamingResponse(
- content=zip_stream,
+ return FileResponse(
+ path=temp_zip.name,
media_type='application/zip',
- headers={'Content-Disposition': f'attachment; filename={path}.zip'},
+ filename=f'{os.path.basename(path)}.zip',
+ background=BackgroundTask(lambda: os.unlink(temp_zip.name)),
)
except Exception as e:
diff --git a/openhands/runtime/base.py b/openhands/runtime/base.py
index 983fc67fa898..8cdd17e18ead 100644
--- a/openhands/runtime/base.py
+++ b/openhands/runtime/base.py
@@ -254,6 +254,9 @@ async def _handle_action(self, event: Action) -> None:
# this might be unnecessary, since source should be set by the event stream when we're here
source = event.source if event.source else EventSource.AGENT
+ if isinstance(observation, NullObservation):
+ # don't add null observations to the event stream
+ return
self.event_stream.add_event(observation, source) # type: ignore[arg-type]
def clone_repo(
diff --git a/openhands/runtime/impl/action_execution/action_execution_client.py b/openhands/runtime/impl/action_execution/action_execution_client.py
index 258dcf3a85f7..683c16578dad 100644
--- a/openhands/runtime/impl/action_execution/action_execution_client.py
+++ b/openhands/runtime/impl/action_execution/action_execution_client.py
@@ -1,4 +1,5 @@
import os
+import shutil
import tempfile
import threading
from abc import abstractmethod
@@ -143,12 +144,10 @@ def copy_from(self, path: str) -> Path:
stream=True,
timeout=30,
) as response:
- with tempfile.NamedTemporaryFile(delete=False) as temp_file:
- total_length = 0
- for chunk in response.iter_content(chunk_size=8192):
- if chunk: # filter out keep-alive new chunks
- total_length += len(chunk)
- temp_file.write(chunk)
+ with tempfile.NamedTemporaryFile(
+ suffix='.zip', delete=False
+ ) as temp_file:
+ shutil.copyfileobj(response.raw, temp_file, length=16 * 1024)
return Path(temp_file.name)
except requests.Timeout:
raise TimeoutError('Copy operation timed out')
diff --git a/openhands/runtime/impl/remote/remote_runtime.py b/openhands/runtime/impl/remote/remote_runtime.py
index 70e4217e4718..7e1256d28303 100644
--- a/openhands/runtime/impl/remote/remote_runtime.py
+++ b/openhands/runtime/impl/remote/remote_runtime.py
@@ -153,6 +153,12 @@ def _check_existing_runtime(self) -> bool:
return False
self.log('debug', f'Error while looking for remote runtime: {e}')
raise
+ except requests.exceptions.JSONDecodeError as e:
+ self.log(
+ 'error',
+ f'Invalid JSON response from runtime API: {e}. URL: {self.config.sandbox.remote_runtime_api_url}/sessions/{self.sid}. Response: {response}',
+ )
+ raise
if status == 'running':
return True
diff --git a/openhands/server/routes/files.py b/openhands/server/routes/files.py
index 1581d3abaddb..eaa805174f9b 100644
--- a/openhands/server/routes/files.py
+++ b/openhands/server/routes/files.py
@@ -3,7 +3,6 @@
from fastapi import (
APIRouter,
- BackgroundTasks,
HTTPException,
Request,
UploadFile,
@@ -12,6 +11,7 @@
from fastapi.responses import FileResponse, JSONResponse
from pathspec import PathSpec
from pathspec.patterns import GitWildMatchPattern
+from starlette.background import BackgroundTask
from openhands.core.exceptions import AgentRuntimeUnavailableError
from openhands.core.logger import openhands_logger as logger
@@ -309,31 +309,25 @@ async def save_file(request: Request):
@app.get('/zip-directory')
-async def zip_current_workspace(
- request: Request, conversation_id: str, background_tasks: BackgroundTasks
-):
+def zip_current_workspace(request: Request, conversation_id: str):
try:
logger.debug('Zipping workspace')
runtime: Runtime = request.state.conversation.runtime
path = runtime.config.workspace_mount_path_in_sandbox
try:
- zip_file = await call_sync_from_async(runtime.copy_from, path)
+ zip_file_path = runtime.copy_from(path)
except AgentRuntimeUnavailableError as e:
logger.error(f'Error zipping workspace: {e}')
return JSONResponse(
status_code=500,
content={'error': f'Error zipping workspace: {e}'},
)
- response = FileResponse(
- path=zip_file,
+ return FileResponse(
+ path=zip_file_path,
filename='workspace.zip',
- media_type='application/x-zip-compressed',
+ media_type='application/zip',
+ background=BackgroundTask(lambda: os.unlink(zip_file_path)),
)
-
- # This will execute after the response is sent (So the file is not deleted before being sent)
- background_tasks.add_task(zip_file.unlink)
-
- return response
except Exception as e:
logger.error(f'Error zipping workspace: {e}')
raise HTTPException(
diff --git a/poetry.lock b/poetry.lock
index 83c962cb1178..1d994791e089 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -920,7 +920,7 @@ version = "3.4.0"
description = "Validate configuration and produce human readable error messages."
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev", "evaluation"]
+groups = ["dev", "evaluation"]
files = [
{file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
{file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
@@ -1634,7 +1634,7 @@ version = "0.3.9"
description = "Distribution utilities"
optional = false
python-versions = "*"
-groups = ["main", "dev", "evaluation"]
+groups = ["dev", "evaluation"]
files = [
{file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"},
{file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"},
@@ -3199,7 +3199,7 @@ version = "2.6.7"
description = "File identification library for Python"
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev", "evaluation"]
+groups = ["dev", "evaluation"]
files = [
{file = "identify-2.6.7-py2.py3-none-any.whl", hash = "sha256:155931cb617a401807b09ecec6635d6c692d180090a1cedca8ef7d58ba5b6aa0"},
{file = "identify-2.6.7.tar.gz", hash = "sha256:3fa266b42eba321ee0b2bb0936a6a6b9e36a1351cbb69055b3082f4193035684"},
@@ -3303,7 +3303,7 @@ version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
-groups = ["main", "evaluation", "test"]
+groups = ["evaluation", "test"]
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
@@ -5503,7 +5503,7 @@ version = "1.9.1"
description = "Node.js virtual environment builder"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-groups = ["main", "dev", "evaluation"]
+groups = ["dev", "evaluation"]
files = [
{file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"},
{file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"},
@@ -5895,16 +5895,18 @@ realtime = ["websockets (>=13,<15)"]
[[package]]
name = "openhands-aci"
-version = "0.2.4"
+version = "0.2.5"
description = "An Agent-Computer Interface (ACI) designed for software development agents OpenHands."
optional = false
-python-versions = "^3.12"
+python-versions = "<4.0,>=3.12"
groups = ["main"]
-files = []
-develop = false
+files = [
+ {file = "openhands_aci-0.2.5-py3-none-any.whl", hash = "sha256:775a3ea9eacf090ff6fa6819dcc449a359a770f2d25232890441a799b0bd3c2e"},
+ {file = "openhands_aci-0.2.5.tar.gz", hash = "sha256:cfa51834771fb7f35cc754f04ee3b6d8d985df79a6fa4bdd0f57a8a20e9f0883"},
+]
[package.dependencies]
-binaryornot = "^0.4.4"
+binaryornot = ">=0.4.4,<0.5.0"
flake8 = "*"
gitpython = "*"
grep-ast = "0.3.3"
@@ -5912,21 +5914,13 @@ litellm = "*"
networkx = "*"
numpy = "*"
pandas = "*"
-pre-commit = "^4.1.0"
-pytest = "^8.3.4"
scipy = "*"
-tree-sitter = "^0.24.0"
-tree-sitter-javascript = "^0.23.1"
-tree-sitter-python = "^0.23.6"
-tree-sitter-ruby = "^0.23.1"
-tree-sitter-typescript = "^0.23.2"
-whatthepatch = "^1.0.6"
-
-[package.source]
-type = "git"
-url = "https://github.com/All-Hands-AI/openhands-aci.git"
-reference = "fix-memory-issue"
-resolved_reference = "d906bdd60b397fd8c30033a59d8f47f343d6ede4"
+tree-sitter = ">=0.24.0,<0.25.0"
+tree-sitter-javascript = ">=0.23.1,<0.24.0"
+tree-sitter-python = ">=0.23.6,<0.24.0"
+tree-sitter-ruby = ">=0.23.1,<0.24.0"
+tree-sitter-typescript = ">=0.23.2,<0.24.0"
+whatthepatch = ">=1.0.6,<2.0.0"
[[package]]
name = "opentelemetry-api"
@@ -6484,7 +6478,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
-groups = ["main", "evaluation", "test"]
+groups = ["evaluation", "test"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -6545,7 +6539,7 @@ version = "4.1.0"
description = "A framework for managing and maintaining multi-language pre-commit hooks."
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev", "evaluation"]
+groups = ["dev", "evaluation"]
files = [
{file = "pre_commit-4.1.0-py2.py3-none-any.whl", hash = "sha256:d29e7cb346295bcc1cc75fc3e92e343495e3ea0196c9ec6ba53f49f10ab6ae7b"},
{file = "pre_commit-4.1.0.tar.gz", hash = "sha256:ae3f018575a588e30dfddfab9a05448bfbd6b73d78709617b5a2b853549716d4"},
@@ -7357,7 +7351,7 @@ version = "8.3.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
-groups = ["main", "evaluation", "test"]
+groups = ["evaluation", "test"]
files = [
{file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"},
{file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"},
@@ -10016,7 +10010,7 @@ version = "20.29.2"
description = "Virtual Python Environment builder"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev", "evaluation"]
+groups = ["dev", "evaluation"]
files = [
{file = "virtualenv-20.29.2-py3-none-any.whl", hash = "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a"},
{file = "virtualenv-20.29.2.tar.gz", hash = "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728"},
@@ -10795,4 +10789,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"]
[metadata]
lock-version = "2.1"
python-versions = "^3.12"
-content-hash = "395c20a1f1bda924c1177369ce9c1ae27d12966da64b9fa60d91ca7895995c17"
+content-hash = "583a46735c1704428110581400333ce35cd7f6a3f22e5831beb7315ebeeb377a"
diff --git a/pyproject.toml b/pyproject.toml
index e1ba08237e5f..b11d2e8991b8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "openhands-ai"
-version = "0.24.0"
+version = "0.25.0"
description = "OpenHands: Code Less, Make More"
authors = ["OpenHands"]
license = "MIT"
@@ -67,7 +67,7 @@ runloop-api-client = "0.23.0"
libtmux = ">=0.37,<0.40"
pygithub = "^2.5.0"
joblib = "*"
-openhands-aci = "^0.2.4"
+openhands-aci = "^0.2.5"
python-socketio = "^5.11.4"
redis = "^5.2.0"
sse-starlette = "^2.1.3"
@@ -108,7 +108,6 @@ reportlab = "*"
[tool.coverage.run]
concurrency = ["gevent"]
-
[tool.poetry.group.runtime.dependencies]
jupyterlab = "*"
notebook = "*"
@@ -137,7 +136,6 @@ ignore = ["D1"]
[tool.ruff.lint.pydocstyle]
convention = "google"
-
[tool.poetry.group.evaluation.dependencies]
streamlit = "*"
whatthepatch = "*"
diff --git a/tests/unit/test_cli.py b/tests/unit/test_cli.py
index 520d85d2aa7d..3931f2fdd713 100644
--- a/tests/unit/test_cli.py
+++ b/tests/unit/test_cli.py
@@ -1,7 +1,7 @@
from unittest.mock import patch
-from openhands.core.cli import read_input
from openhands.core.config import AppConfig
+from openhands.io import read_input
def test_single_line_input():
@@ -10,7 +10,7 @@ def test_single_line_input():
config.cli_multiline_input = False
with patch('builtins.input', return_value='hello world'):
- result = read_input(config)
+ result = read_input(config.cli_multiline_input)
assert result == 'hello world'
@@ -23,5 +23,5 @@ def test_multiline_input():
mock_inputs = ['line 1', 'line 2', 'line 3', '/exit']
with patch('builtins.input', side_effect=mock_inputs):
- result = read_input(config)
+ result = read_input(config.cli_multiline_input)
assert result == 'line 1\nline 2\nline 3'
diff --git a/tests/unit/test_condenser.py b/tests/unit/test_condenser.py
index e8c5af60afbb..fd1e922a103a 100644
--- a/tests/unit/test_condenser.py
+++ b/tests/unit/test_condenser.py
@@ -19,6 +19,7 @@
from openhands.events.observation.observation import Observation
from openhands.llm import LLM
from openhands.memory.condenser import Condenser
+from openhands.memory.condenser.condenser import RollingCondenser
from openhands.memory.condenser.impl import (
AmortizedForgettingCondenser,
ImportantEventSelection,
@@ -452,6 +453,49 @@ def test_llm_attention_condenser_invalid_config():
pytest.raises(ValueError, LLMAttentionCondenser.from_config, config)
+def test_rolling_condenser_handles_truncation(mock_state: State):
+ """Test that RollingCondenser correctly handles history truncation."""
+
+ class TestRollingCondenser(RollingCondenser):
+ """Test implementation of RollingCondenser that just returns all events."""
+
+ def condense(self, events: list[Event]) -> list[Event]:
+ return events
+
+ condenser = TestRollingCondenser()
+
+ # Initial history with 3 events
+ events = [
+ create_test_event('Event 1', id=1),
+ create_test_event('Event 2', id=2),
+ create_test_event('Event 3', id=3),
+ ]
+ mock_state.history = events
+
+ # First condensation - should return all events
+ results = condenser.condensed_history(mock_state)
+ assert len(results) == 3
+ assert [e._id for e in results] == [1, 2, 3]
+
+ # Simulate truncation - history is now shorter, and the condensation should
+ # just include the truncated history
+ mock_state.history = mock_state.history[-1:]
+
+ results = condenser.condensed_history(mock_state)
+ assert len(results) == 1
+ assert results[0]._id == 3
+
+ # Adding more events and condensing should "rebase" us from the truncated history
+ mock_state.history += [
+ create_test_event('Event 4', id=4),
+ create_test_event('Event 5', id=5),
+ ]
+
+ results = condenser.condensed_history(mock_state)
+ assert len(results) == 3
+ assert [e._id for e in results] == [3, 4, 5]
+
+
def test_llm_attention_condenser_keeps_first_events(mock_llm, mock_state):
"""Test that the LLMAttentionCondenser keeps the right number of initial events when forgetting."""
max_size = 4
diff --git a/tests/unit/test_json.py b/tests/unit/test_json.py
index 883efdfe4cfb..85ab265a536d 100644
--- a/tests/unit/test_json.py
+++ b/tests/unit/test_json.py
@@ -1,7 +1,7 @@
from datetime import datetime
-from openhands.core.utils import json
from openhands.events.action import MessageAction
+from openhands.io import json
def test_event_serialization_deserialization():
diff --git a/tests/unit/test_json_encoder.py b/tests/unit/test_json_encoder.py
index daa2708a6256..10058c8c2ba3 100644
--- a/tests/unit/test_json_encoder.py
+++ b/tests/unit/test_json_encoder.py
@@ -3,7 +3,7 @@
import psutil
-from openhands.core.utils.json import dumps
+from openhands.io.json import dumps
def get_memory_usage():
diff --git a/tests/unit/test_response_parsing.py b/tests/unit/test_response_parsing.py
index fd588d4c6edf..dc51dee3abe4 100644
--- a/tests/unit/test_response_parsing.py
+++ b/tests/unit/test_response_parsing.py
@@ -2,11 +2,11 @@
from openhands.agenthub.micro.agent import parse_response as parse_response_micro
from openhands.core.exceptions import LLMResponseError
-from openhands.core.utils.json import loads as custom_loads
from openhands.events.action import (
FileWriteAction,
MessageAction,
)
+from openhands.io import loads as custom_loads
@pytest.mark.parametrize(