diff --git a/.github/ISSUE_TEMPLATE/bug_template.yml b/.github/ISSUE_TEMPLATE/bug_template.yml index 7a6a0ba244f6..a5011583a658 100644 --- a/.github/ISSUE_TEMPLATE/bug_template.yml +++ b/.github/ISSUE_TEMPLATE/bug_template.yml @@ -30,6 +30,7 @@ body: description: How are you running OpenHands? options: - Docker command in README + - GitHub resolver - Development workflow - app.all-hands.dev - Other diff --git a/.github/workflows/ghcr-build.yml b/.github/workflows/ghcr-build.yml index 930cc61fae95..acdc89f0f495 100644 --- a/.github/workflows/ghcr-build.yml +++ b/.github/workflows/ghcr-build.yml @@ -56,7 +56,7 @@ jobs: docker-images: false swap-storage: true - name: Set up QEMU - uses: docker/setup-qemu-action@v3.2.0 + uses: docker/setup-qemu-action@v3.3.0 with: image: tonistiigi/binfmt:latest - name: Login to GHCR @@ -119,7 +119,7 @@ jobs: docker-images: false swap-storage: true - name: Set up QEMU - uses: docker/setup-qemu-action@v3.2.0 + uses: docker/setup-qemu-action@v3.3.0 with: image: tonistiigi/binfmt:latest - name: Login to GHCR diff --git a/.github/workflows/integration-runner.yml b/.github/workflows/integration-runner.yml index 120572aa0cdd..00d7c45957ef 100644 --- a/.github/workflows/integration-runner.yml +++ b/.github/workflows/integration-runner.yml @@ -56,6 +56,7 @@ jobs: LLM_MODEL: "litellm_proxy/claude-3-5-haiku-20241022" LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} + MAX_ITERATIONS: 10 run: | echo "[llm.eval]" > config.toml echo "model = \"$LLM_MODEL\"" >> config.toml @@ -70,7 +71,7 @@ jobs: env: SANDBOX_FORCE_REBUILD_RUNTIME: True run: | - poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD CodeActAgent '' $N_PROCESSES '' 'haiku_run' + poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD CodeActAgent '' 10 $N_PROCESSES '' 'haiku_run' # get integration tests report REPORT_FILE_HAIKU=$(find evaluation/evaluation_outputs/outputs/integration_tests/CodeActAgent/*haiku*_maxiter_10_N* -name "report.md" -type f | head -n 1) @@ -88,6 +89,7 @@ jobs: LLM_MODEL: "litellm_proxy/deepseek-chat" LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} + MAX_ITERATIONS: 10 run: | echo "[llm.eval]" > config.toml echo "model = \"$LLM_MODEL\"" >> config.toml @@ -99,7 +101,7 @@ jobs: env: SANDBOX_FORCE_REBUILD_RUNTIME: True run: | - poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD CodeActAgent '' $N_PROCESSES '' 'deepseek_run' + poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD CodeActAgent '' 10 $N_PROCESSES '' 'deepseek_run' # get integration tests report REPORT_FILE_DEEPSEEK=$(find evaluation/evaluation_outputs/outputs/integration_tests/CodeActAgent/deepseek*_maxiter_10_N* -name "report.md" -type f | head -n 1) @@ -109,11 +111,104 @@ jobs: echo >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV + # ------------------------------------------------------------- + # Run DelegatorAgent tests for Haiku, limited to t01 and t02 + - name: Wait a little bit (again) + run: sleep 5 + + - name: Configure config.toml for testing DelegatorAgent (Haiku) + env: + LLM_MODEL: "litellm_proxy/claude-3-5-haiku-20241022" + LLM_API_KEY: ${{ secrets.LLM_API_KEY }} + LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} + MAX_ITERATIONS: 30 + run: | + echo "[llm.eval]" > config.toml + echo "model = \"$LLM_MODEL\"" >> config.toml + echo "api_key = \"$LLM_API_KEY\"" >> config.toml + echo "base_url = \"$LLM_BASE_URL\"" >> config.toml + echo "temperature = 0.0" >> config.toml + + - name: Run integration test evaluation for DelegatorAgent (Haiku) + env: + SANDBOX_FORCE_REBUILD_RUNTIME: True + run: | + poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD DelegatorAgent '' 30 $N_PROCESSES "t01_fix_simple_typo,t02_add_bash_hello" 'delegator_haiku_run' + + # Find and export the delegator test results + REPORT_FILE_DELEGATOR_HAIKU=$(find evaluation/evaluation_outputs/outputs/integration_tests/DelegatorAgent/*haiku*_maxiter_30_N* -name "report.md" -type f | head -n 1) + echo "REPORT_FILE_DELEGATOR_HAIKU: $REPORT_FILE_DELEGATOR_HAIKU" + echo "INTEGRATION_TEST_REPORT_DELEGATOR_HAIKU<> $GITHUB_ENV + cat $REPORT_FILE_DELEGATOR_HAIKU >> $GITHUB_ENV + echo >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + # ------------------------------------------------------------- + # Run DelegatorAgent tests for DeepSeek, limited to t01 and t02 + - name: Wait a little bit (again) + run: sleep 5 + + - name: Configure config.toml for testing DelegatorAgent (DeepSeek) + env: + LLM_MODEL: "litellm_proxy/deepseek-chat" + LLM_API_KEY: ${{ secrets.LLM_API_KEY }} + LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} + MAX_ITERATIONS: 30 + run: | + echo "[llm.eval]" > config.toml + echo "model = \"$LLM_MODEL\"" >> config.toml + echo "api_key = \"$LLM_API_KEY\"" >> config.toml + echo "base_url = \"$LLM_BASE_URL\"" >> config.toml + echo "temperature = 0.0" >> config.toml + - name: Run integration test evaluation for DelegatorAgent (DeepSeek) + env: + SANDBOX_FORCE_REBUILD_RUNTIME: True + run: | + poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD DelegatorAgent '' 30 $N_PROCESSES "t01_fix_simple_typo,t02_add_bash_hello" 'delegator_deepseek_run' + + # Find and export the delegator test results + REPORT_FILE_DELEGATOR_DEEPSEEK=$(find evaluation/evaluation_outputs/outputs/integration_tests/DelegatorAgent/deepseek*_maxiter_30_N* -name "report.md" -type f | head -n 1) + echo "REPORT_FILE_DELEGATOR_DEEPSEEK: $REPORT_FILE_DELEGATOR_DEEPSEEK" + echo "INTEGRATION_TEST_REPORT_DELEGATOR_DEEPSEEK<> $GITHUB_ENV + cat $REPORT_FILE_DELEGATOR_DEEPSEEK >> $GITHUB_ENV + echo >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + # ------------------------------------------------------------- + # Run VisualBrowsingAgent tests for DeepSeek, limited to t05 and t06 + - name: Wait a little bit (again) + run: sleep 5 + + - name: Configure config.toml for testing VisualBrowsingAgent (DeepSeek) + env: + LLM_MODEL: "litellm_proxy/deepseek-chat" + LLM_API_KEY: ${{ secrets.LLM_API_KEY }} + LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} + MAX_ITERATIONS: 15 + run: | + echo "[llm.eval]" > config.toml + echo "model = \"$LLM_MODEL\"" >> config.toml + echo "api_key = \"$LLM_API_KEY\"" >> config.toml + echo "base_url = \"$LLM_BASE_URL\"" >> config.toml + echo "temperature = 0.0" >> config.toml + - name: Run integration test evaluation for VisualBrowsingAgent (DeepSeek) + env: + SANDBOX_FORCE_REBUILD_RUNTIME: True + run: | + poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD VisualBrowsingAgent '' 15 $N_PROCESSES "t05_simple_browsing,t06_github_pr_browsing.py" 'visualbrowsing_deepseek_run' + + # Find and export the visual browsing agent test results + REPORT_FILE_VISUALBROWSING_DEEPSEEK=$(find evaluation/evaluation_outputs/outputs/integration_tests/VisualBrowsingAgent/deepseek*_maxiter_15_N* -name "report.md" -type f | head -n 1) + echo "REPORT_FILE_VISUALBROWSING_DEEPSEEK: $REPORT_FILE_VISUALBROWSING_DEEPSEEK" + echo "INTEGRATION_TEST_REPORT_VISUALBROWSING_DEEPSEEK<> $GITHUB_ENV + cat $REPORT_FILE_VISUALBROWSING_DEEPSEEK >> $GITHUB_ENV + echo >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + - name: Create archive of evaluation outputs run: | TIMESTAMP=$(date +'%y-%m-%d-%H-%M') cd evaluation/evaluation_outputs/outputs # Change to the outputs directory - tar -czvf ../../../integration_tests_${TIMESTAMP}.tar.gz integration_tests/CodeActAgent/* # Only include the actual result directories + tar -czvf ../../../integration_tests_${TIMESTAMP}.tar.gz integration_tests/CodeActAgent/* integration_tests/DelegatorAgent/* integration_tests/VisualBrowsingAgent/* # Only include the actual result directories - name: Upload evaluation results as artifact uses: actions/upload-artifact@v4 @@ -154,5 +249,14 @@ jobs: **Integration Tests Report (DeepSeek)** DeepSeek LLM Test Results: ${{ env.INTEGRATION_TEST_REPORT_DEEPSEEK }} + --- + **Integration Tests Report Delegator (Haiku)** + ${{ env.INTEGRATION_TEST_REPORT_DELEGATOR_HAIKU }} + --- + **Integration Tests Report Delegator (DeepSeek)** + ${{ env.INTEGRATION_TEST_REPORT_DELEGATOR_DEEPSEEK }} + --- + **Integration Tests Report VisualBrowsing (DeepSeek)** + ${{ env.INTEGRATION_TEST_REPORT_VISUALBROWSING_DEEPSEEK }} --- Download testing outputs (includes both Haiku and DeepSeek results): [Download](${{ steps.upload_results_artifact.outputs.artifact-url }}) diff --git a/.github/workflows/openhands-resolver.yml b/.github/workflows/openhands-resolver.yml index 028316ee05d5..f0fed3ac70ad 100644 --- a/.github/workflows/openhands-resolver.yml +++ b/.github/workflows/openhands-resolver.yml @@ -84,6 +84,10 @@ jobs: run: | python -m pip index versions openhands-ai > openhands_versions.txt OPENHANDS_VERSION=$(head -n 1 openhands_versions.txt | awk '{print $2}' | tr -d '()') + # Ensure requirements.txt ends with newline before appending + if [ -f requirements.txt ] && [ -s requirements.txt ]; then + sed -i -e '$a\' requirements.txt + fi echo "openhands-ai==${OPENHANDS_VERSION}" >> requirements.txt cat requirements.txt @@ -184,6 +188,7 @@ jobs: }); - name: Install OpenHands + id: install_openhands uses: actions/github-script@v7 env: COMMENT_BODY: ${{ github.event.comment.body || '' }} @@ -196,7 +201,6 @@ jobs: const reviewBody = process.env.REVIEW_BODY.trim(); const labelName = process.env.LABEL_NAME.trim(); const eventName = process.env.EVENT_NAME.trim(); - // Check conditions const isExperimentalLabel = labelName === "fix-me-experimental"; const isIssueCommentExperimental = @@ -205,6 +209,9 @@ jobs: const isReviewCommentExperimental = eventName === "pull_request_review" && reviewBody.includes("@openhands-agent-exp"); + // Set output variable + core.setOutput('isExperimental', isExperimentalLabel || isIssueCommentExperimental || isReviewCommentExperimental); + // Perform package installation if (isExperimentalLabel || isIssueCommentExperimental || isReviewCommentExperimental) { console.log("Installing experimental OpenHands..."); @@ -230,7 +237,8 @@ jobs: --issue-number ${{ env.ISSUE_NUMBER }} \ --issue-type ${{ env.ISSUE_TYPE }} \ --max-iterations ${{ env.MAX_ITERATIONS }} \ - --comment-id ${{ env.COMMENT_ID }} + --comment-id ${{ env.COMMENT_ID }} \ + --is-experimental ${{ steps.install_openhands.outputs.isExperimental }} - name: Check resolution result id: check_result diff --git a/.github/workflows/py-unit-tests-mac.yml b/.github/workflows/py-unit-tests-mac.yml deleted file mode 100644 index 4d9e8bf5cbd4..000000000000 --- a/.github/workflows/py-unit-tests-mac.yml +++ /dev/null @@ -1,98 +0,0 @@ -# Workflow that runs python unit tests on mac -name: Run Python Unit Tests Mac - -# This job is flaky so only run it nightly -on: - schedule: - - cron: '0 0 * * *' - -jobs: - # Run python unit tests on macOS - test-on-macos: - name: Python Unit Tests on macOS - runs-on: macos-14 - env: - INSTALL_DOCKER: '1' # Set to '0' to skip Docker installation - strategy: - matrix: - python-version: ['3.12'] - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Cache Poetry dependencies - uses: actions/cache@v4 - with: - path: | - ~/.cache/pypoetry - ~/.virtualenvs - key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} - restore-keys: | - ${{ runner.os }}-poetry- - - name: Install tmux - run: brew install tmux - - name: Install poetry via pipx - run: pipx install poetry - - name: Install Python dependencies using Poetry - run: poetry install --without evaluation,llama-index - - name: Install & Start Docker - if: env.INSTALL_DOCKER == '1' - run: | - INSTANCE_NAME="colima-${GITHUB_RUN_ID}" - - # Uninstall colima to upgrade to the latest version - if brew list colima &>/dev/null; then - brew uninstall colima - # unlinking colima dependency: go - brew uninstall go@1.21 - fi - rm -rf ~/.colima ~/.lima - brew install --HEAD colima - brew install docker - - start_colima() { - # Find a free port in the range 10000-20000 - RANDOM_PORT=$((RANDOM % 10001 + 10000)) - - # Original line: - if ! colima start --network-address --arch x86_64 --cpu=1 --memory=1 --verbose --ssh-port $RANDOM_PORT; then - echo "Failed to start Colima." - return 1 - fi - return 0 - } - - # Attempt to start Colima for 5 total attempts: - ATTEMPT_LIMIT=5 - for ((i=1; i<=ATTEMPT_LIMIT; i++)); do - - if start_colima; then - echo "Colima started successfully." - break - else - colima stop -f - sleep 10 - colima delete -f - if [ $i -eq $ATTEMPT_LIMIT ]; then - exit 1 - fi - sleep 10 - fi - done - - # For testcontainers to find the Colima socket - # https://github.com/abiosoft/colima/blob/main/docs/FAQ.md#cannot-connect-to-the-docker-daemon-at-unixvarrundockersock-is-the-docker-daemon-running - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - - name: Build Environment - run: make build - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v3 - - name: Run Tests - run: poetry run pytest --forked --cov=openhands --cov-report=xml ./tests/unit --ignore=tests/unit/test_memory.py - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v5 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 4cccbce15e27..0d9e879efd91 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,3 +19,4 @@ jobs: close-issue-message: 'This issue was closed because it has been stalled for over 30 days with no activity.' close-pr-message: 'This PR was closed because it has been stalled for over 30 days with no activity.' days-before-close: 7 + operations-per-run: 150 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 502058015a09..e033bde194d2 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -113,6 +113,20 @@ individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. +### Slack and Discord Etiquettes + +These Slack and Discord etiquette guidelines are designed to foster an inclusive, respectful, and productive environment for all community members. By following these best practices, we ensure effective communication and collaboration while minimizing disruptions. Let’s work together to build a supportive and welcoming community! + +- Communicate respectfully and professionally, avoiding sarcasm or harsh language, and remember that tone can be difficult to interpret in text. +- Use threads for specific discussions to keep channels organized and easier to follow. +- Tag others only when their input is critical or urgent, and use @here, @channel or @everyone sparingly to minimize disruptions. +- Be patient, as open-source contributors and maintainers often have other commitments and may need time to respond. +- Post questions or discussions in the most relevant channel (e.g., for [slack - #general](https://app.slack.com/client/T06P212QSEA/C06P5NCGSFP) for general topics, [slack - #questions](https://openhands-ai.slack.com/archives/C06U8UTKSAD) for queries/questions, [discord - #general](https://discord.com/channels/1222935860639563850/1222935861386018885)). +- When asking for help or raising issues, include necessary details like links, screenshots, or clear explanations to provide context. +- Keep discussions in public channels whenever possible to allow others to benefit from the conversation, unless the matter is sensitive or private. +- Always adhere to [our standards](https://github.com/All-Hands-AI/OpenHands/blob/main/CODE_OF_CONDUCT.md#our-standards) to ensure a welcoming and collaborative environment. +- If you choose to mute a channel, consider setting up alerts for topics that still interest you to stay engaged. For Slack, Go to Settings → Notifications → My Keywords to add specific keywords that will notify you when mentioned. For example, if you're here for discussions about LLMs, mute the channel if it’s too busy, but set notifications to alert you only when “LLMs” appears in messages. Also for Discord, go to the channel notifications and choose the option that best describes your need. + ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], diff --git a/Development.md b/Development.md index 3ac856c2cea6..354161b6b61c 100644 --- a/Development.md +++ b/Development.md @@ -5,7 +5,7 @@ Otherwise, you can clone the OpenHands project directly. ## Start the Server for Development ### 1. Requirements -* Linux, Mac OS, or [WSL on Windows](https://learn.microsoft.com/en-us/windows/wsl/install) [Ubuntu <= 22.04] +* Linux, Mac OS, or [WSL on Windows](https://learn.microsoft.com/en-us/windows/wsl/install) [Ubuntu >= 22.04] * [Docker](https://docs.docker.com/engine/install/) (For those on MacOS, make sure to allow the default Docker socket to be used from advanced settings!) * [Python](https://www.python.org/downloads/) = 3.12 * [NodeJS](https://nodejs.org/en/download/package-manager) >= 20.x @@ -100,7 +100,7 @@ poetry run pytest ./tests/unit/test_*.py To reduce build time (e.g., if no changes were made to the client-runtime component), you can use an existing Docker container image by setting the SANDBOX_RUNTIME_CONTAINER_IMAGE environment variable to the desired Docker image. -Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.19-nikolaik` +Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.22-nikolaik` ## Develop inside Docker container diff --git a/README.md b/README.md index b4f8384be94e..b0e8b9bd5fd9 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ CodeCov MIT License
- Join our Slack community + Join our Slack community Join our Discord community Credits
@@ -39,21 +39,21 @@ Learn more at [docs.all-hands.dev](https://docs.all-hands.dev), or jump to the [ ## ⚡ Quick Start The easiest way to run OpenHands is in Docker. -See the [Installation](https://docs.all-hands.dev/modules/usage/installation) guide for +See the [Running OpenHands](https://docs.all-hands.dev/modules/usage/installation) guide for system requirements and more information. ```bash -docker pull docker.all-hands.dev/all-hands-ai/runtime:0.19-nikolaik +docker pull docker.all-hands.dev/all-hands-ai/runtime:0.22-nikolaik docker run -it --rm --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.19-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.22-nikolaik \ -e LOG_ALL_EVENTS=true \ -v /var/run/docker.sock:/var/run/docker.sock \ -v ~/.openhands-state:/.openhands-state \ -p 3000:3000 \ --add-host host.docker.internal:host-gateway \ --name openhands-app \ - docker.all-hands.dev/all-hands-ai/openhands:0.19 + docker.all-hands.dev/all-hands-ai/openhands:0.22 ``` You'll find OpenHands running at [http://localhost:3000](http://localhost:3000)! @@ -69,7 +69,7 @@ run OpenHands in a scriptable [headless mode](https://docs.all-hands.dev/modules interact with it via a [friendly CLI](https://docs.all-hands.dev/modules/usage/how-to/cli-mode), or run it on tagged issues with [a github action](https://docs.all-hands.dev/modules/usage/how-to/github-action). -Visit [Installation](https://docs.all-hands.dev/modules/usage/installation) for more information and setup instructions. +Visit [Running OpenHands](https://docs.all-hands.dev/modules/usage/installation) for more information and setup instructions. > [!CAUTION] > OpenHands is meant to be run by a single user on their local workstation. @@ -96,7 +96,7 @@ troubleshooting resources, and advanced configuration options. OpenHands is a community-driven project, and we welcome contributions from everyone. We do most of our communication through Slack, so this is the best place to start, but we also are happy to have you contact us on Discord or Github: -- [Join our Slack workspace](https://join.slack.com/t/openhands-ai/shared_invite/zt-2wkh4pklz-w~h_DVDtEe9H5kyQlcNxVw) - Here we talk about research, architecture, and future development. +- [Join our Slack workspace](https://join.slack.com/t/openhands-ai/shared_invite/zt-2ypg5jweb-d~6hObZDbXi_HEL8PDrbHg) - Here we talk about research, architecture, and future development. - [Join our Discord server](https://discord.gg/ESHStjSjD4) - This is a community-run server for general discussion, questions, and feedback. - [Read or post Github Issues](https://github.com/All-Hands-AI/OpenHands/issues) - Check out the issues we're working on, or add your own ideas. diff --git a/config.template.toml b/config.template.toml index fa271e0ebe1f..4da00e5df010 100644 --- a/config.template.toml +++ b/config.template.toml @@ -23,6 +23,9 @@ workspace_base = "./workspace" # Cache directory path #cache_dir = "/tmp/cache" +# Reasoning effort for o1 models (low, medium, high, or not set) +#reasoning_effort = "medium" + # Debugging enabled #debug = false @@ -34,7 +37,12 @@ workspace_base = "./workspace" # Path to store trajectories, can be a folder or a file # If it's a folder, the session id will be used as the file name -#trajectories_path="./trajectories" +#save_trajectory_path="./trajectories" + +# Path to replay a trajectory, must be a file path +# If provided, trajectory will be loaded and replayed before the +# agent responds to any user instruction +#replay_trajectory_path = "" # File store path #file_store_path = "/tmp/file_store" @@ -96,7 +104,7 @@ workspace_base = "./workspace" #aws_secret_access_key = "" # API key to use (For Headless / CLI only - In Web this is overridden by Session Init) -api_key = "your-api-key" +api_key = "" # API base URL (For Headless / CLI only - In Web this is overridden by Session Init) #base_url = "" @@ -187,7 +195,7 @@ model = "gpt-4o" #native_tool_calling = None [llm.gpt4o-mini] -api_key = "your-api-key" +api_key = "" model = "gpt-4o" @@ -220,8 +228,8 @@ codeact_enable_jupyter = true # LLM config group to use #llm_config = 'your-llm-config-group' -# Whether to use microagents at all -#use_microagents = true +# Whether to use prompt extension (e.g., microagent, repo/runtime info) at all +#enable_prompt_extensions = true # List of microagents to disable #disabled_microagents = [] diff --git a/containers/dev/compose.yml b/containers/dev/compose.yml index 582774206e44..c744ba495c5b 100644 --- a/containers/dev/compose.yml +++ b/containers/dev/compose.yml @@ -11,7 +11,7 @@ services: - BACKEND_HOST=${BACKEND_HOST:-"0.0.0.0"} - SANDBOX_API_HOSTNAME=host.docker.internal # - - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.19-nikolaik} + - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.22-nikolaik} - SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} - WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace} ports: diff --git a/compose.yml b/docker-compose.yml similarity index 65% rename from compose.yml rename to docker-compose.yml index dc36f0d43bce..300e2b5c3fe5 100644 --- a/compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -# + services: openhands: build: @@ -7,8 +7,8 @@ services: image: openhands:latest container_name: openhands-app-${DATE:-} environment: - - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.19-nikolaik} - - SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} + - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.22-nikolaik} + #- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of openhands-state for this user - WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace} ports: - "3000:3000" @@ -16,6 +16,7 @@ services: - "host.docker.internal:host-gateway" volumes: - /var/run/docker.sock:/var/run/docker.sock + - ~/.openhands-state:/.openhands-state - ${WORKSPACE_BASE:-$PWD/workspace}:/opt/workspace_base pull_policy: build stdin_open: true diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/configuration-options.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/configuration-options.md index 9ae2302ff364..7115c85b1e1f 100644 --- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/configuration-options.md +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/configuration-options.md @@ -1,5 +1,3 @@ - - # Options de configuration Ce guide détaille toutes les options de configuration disponibles pour OpenHands, vous aidant à personnaliser son comportement et à l'intégrer avec d'autres services. @@ -94,7 +92,7 @@ Les options de configuration de base sont définies dans la section `[core]` du - Description : Désactiver la couleur dans la sortie du terminal **Trajectoires** -- `trajectories_path` +- `save_trajectory_path` - Type : `str` - Valeur par défaut : `"./trajectories"` - Description : Chemin pour stocker les trajectoires (peut être un dossier ou un fichier). Si c'est un dossier, les trajectoires seront enregistrées dans un fichier nommé avec l'ID de session et l'extension .json, dans ce dossier. @@ -184,6 +182,10 @@ Les options de configuration LLM (Large Language Model) sont définies dans la s Pour les utiliser avec la commande docker, passez `-e LLM_