From ba599c7dd6dd8b0e99af8cec8d1883b480f2f811 Mon Sep 17 00:00:00 2001 From: "sp.wack" <83104063+amanape@users.noreply.github.com> Date: Wed, 12 Feb 2025 22:46:15 +0400 Subject: [PATCH 01/44] chore: Throw a 404 instead of returning defaults if settings does not exist (#6704) --- .../components/shared/modals/settings/settings-modal.tsx | 5 +++-- frontend/src/hooks/query/use-settings.ts | 8 ++++---- openhands/storage/settings/file_settings_store.py | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/frontend/src/components/shared/modals/settings/settings-modal.tsx b/frontend/src/components/shared/modals/settings/settings-modal.tsx index badd7babffe1..5caa97f325c4 100644 --- a/frontend/src/components/shared/modals/settings/settings-modal.tsx +++ b/frontend/src/components/shared/modals/settings/settings-modal.tsx @@ -5,9 +5,10 @@ import { LoadingSpinner } from "../../loading-spinner"; import { ModalBackdrop } from "../modal-backdrop"; import { SettingsForm } from "./settings-form"; import { Settings } from "#/types/settings"; +import { DEFAULT_SETTINGS } from "#/services/settings"; interface SettingsModalProps { - settings: Settings; + settings?: Settings; onClose: () => void; } @@ -38,7 +39,7 @@ export function SettingsModal({ onClose, settings }: SettingsModalProps) { )} {aiConfigOptions.data && ( { const query = useQuery({ queryKey: ["settings"], queryFn: getSettingsQueryFn, - initialData: DEFAULT_SETTINGS, - staleTime: 0, - retry: false, enabled: config?.APP_MODE !== "saas" || githubTokenIsSet, + // Only retry if the error is not a 404 because we + // would want to show the modal immediately if the + // settings are not found + retry: (_, error) => error.status !== 404, meta: { disableToast: true, }, diff --git a/openhands/storage/settings/file_settings_store.py b/openhands/storage/settings/file_settings_store.py index eaf35554d7ae..d3cc08677078 100644 --- a/openhands/storage/settings/file_settings_store.py +++ b/openhands/storage/settings/file_settings_store.py @@ -23,7 +23,7 @@ async def load(self) -> Settings | None: settings = Settings(**kwargs) return settings except FileNotFoundError: - return Settings.from_config() + return None async def store(self, settings: Settings): json_str = settings.model_dump_json(context={'expose_secrets': True}) From 312b9fbfb1c284c87ec57e0998cf3a731f795b70 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Wed, 12 Feb 2025 15:39:10 -0500 Subject: [PATCH 02/44] Feat: Add selected branch param to backend (#6508) --- frontend/src/api/open-hands.ts | 1 + openhands/runtime/base.py | 25 ++++++++++++++++--- .../server/routes/manage_conversations.py | 5 ++++ openhands/server/session/agent_session.py | 8 +++++- .../server/session/conversation_init_data.py | 1 + openhands/server/session/session.py | 3 +++ 6 files changed, 38 insertions(+), 5 deletions(-) diff --git a/frontend/src/api/open-hands.ts b/frontend/src/api/open-hands.ts index 92da6c4d2956..0f8771a161b3 100644 --- a/frontend/src/api/open-hands.ts +++ b/frontend/src/api/open-hands.ts @@ -229,6 +229,7 @@ class OpenHands { ): Promise { const body = { selected_repository: selectedRepository, + selected_branch: undefined, initial_user_msg: initialUserMsg, image_urls: imageUrls, }; diff --git a/openhands/runtime/base.py b/openhands/runtime/base.py index b7c93eea6df2..4f1e37f471dc 100644 --- a/openhands/runtime/base.py +++ b/openhands/runtime/base.py @@ -249,20 +249,37 @@ async def _handle_action(self, event: Action) -> None: source = event.source if event.source else EventSource.AGENT self.event_stream.add_event(observation, source) # type: ignore[arg-type] - def clone_repo(self, github_token: SecretStr, selected_repository: str) -> str: + def clone_repo( + self, + github_token: SecretStr, + selected_repository: str, + selected_branch: str | None, + ) -> str: if not github_token or not selected_repository: raise ValueError( 'github_token and selected_repository must be provided to clone a repository' ) url = f'https://{github_token.get_secret_value()}@github.com/{selected_repository}.git' dir_name = selected_repository.split('/')[1] - # add random branch name to avoid conflicts + + # Generate a random branch name to avoid conflicts random_str = ''.join( random.choices(string.ascii_lowercase + string.digits, k=8) ) - branch_name = f'openhands-workspace-{random_str}' + openhands_workspace_branch = f'openhands-workspace-{random_str}' + + # Clone repository command + clone_command = f'git clone {url} {dir_name}' + + # Checkout to appropriate branch + checkout_command = ( + f'git checkout {selected_branch}' + if selected_branch + else f'git checkout -b {openhands_workspace_branch}' + ) + action = CmdRunAction( - command=f'git clone {url} {dir_name} ; cd {dir_name} ; git checkout -b {branch_name}', + command=f'{clone_command} ; cd {dir_name} ; {checkout_command}', ) self.log('info', f'Cloning repo: {selected_repository}') self.run_action(action) diff --git a/openhands/server/routes/manage_conversations.py b/openhands/server/routes/manage_conversations.py index 4edfb47c5177..29db83007656 100644 --- a/openhands/server/routes/manage_conversations.py +++ b/openhands/server/routes/manage_conversations.py @@ -38,6 +38,7 @@ class InitSessionRequest(BaseModel): selected_repository: str | None = None + selected_branch: str | None = None initial_user_msg: str | None = None image_urls: list[str] | None = None @@ -46,6 +47,7 @@ async def _create_new_conversation( user_id: str | None, token: SecretStr | None, selected_repository: str | None, + selected_branch: str | None, initial_user_msg: str | None, image_urls: list[str] | None, ): @@ -74,6 +76,7 @@ async def _create_new_conversation( session_init_args['github_token'] = token or SecretStr('') session_init_args['selected_repository'] = selected_repository + session_init_args['selected_branch'] = selected_branch conversation_init_data = ConversationInitData(**session_init_args) logger.info('Loading conversation store') conversation_store = await ConversationStoreImpl.get_instance(config, user_id) @@ -135,6 +138,7 @@ async def new_conversation(request: Request, data: InitSessionRequest): github_token = await gh_client.get_latest_token() selected_repository = data.selected_repository + selected_branch = data.selected_branch initial_user_msg = data.initial_user_msg image_urls = data.image_urls or [] @@ -144,6 +148,7 @@ async def new_conversation(request: Request, data: InitSessionRequest): user_id, github_token, selected_repository, + selected_branch, initial_user_msg, image_urls, ) diff --git a/openhands/server/session/agent_session.py b/openhands/server/session/agent_session.py index 298474b884a2..31a31bd151fe 100644 --- a/openhands/server/session/agent_session.py +++ b/openhands/server/session/agent_session.py @@ -76,6 +76,7 @@ async def start( agent_configs: dict[str, AgentConfig] | None = None, github_token: SecretStr | None = None, selected_repository: str | None = None, + selected_branch: str | None = None, initial_message: MessageAction | None = None, ): """Starts the Agent session @@ -105,6 +106,7 @@ async def start( agent=agent, github_token=github_token, selected_repository=selected_repository, + selected_branch=selected_branch, ) self.controller = self._create_controller( @@ -184,6 +186,7 @@ async def _create_runtime( agent: Agent, github_token: SecretStr | None = None, selected_repository: str | None = None, + selected_branch: str | None = None, ): """Creates a runtime instance @@ -239,7 +242,10 @@ async def _create_runtime( repo_directory = None if selected_repository: repo_directory = await call_sync_from_async( - self.runtime.clone_repo, github_token, selected_repository + self.runtime.clone_repo, + github_token, + selected_repository, + selected_branch, ) if agent.prompt_manager: diff --git a/openhands/server/session/conversation_init_data.py b/openhands/server/session/conversation_init_data.py index 8773f48b326a..4cb6acd50f22 100644 --- a/openhands/server/session/conversation_init_data.py +++ b/openhands/server/session/conversation_init_data.py @@ -10,3 +10,4 @@ class ConversationInitData(Settings): github_token: SecretStr | None = Field(default=None) selected_repository: str | None = Field(default=None) + selected_branch: str | None = Field(default=None) diff --git a/openhands/server/session/session.py b/openhands/server/session/session.py index 5d34baf4f6e5..d7807fc94740 100644 --- a/openhands/server/session/session.py +++ b/openhands/server/session/session.py @@ -123,9 +123,11 @@ async def initialize_agent( github_token = None selected_repository = None + selected_branch = None if isinstance(settings, ConversationInitData): github_token = settings.github_token selected_repository = settings.selected_repository + selected_branch = settings.selected_branch try: await self.agent_session.start( @@ -138,6 +140,7 @@ async def initialize_agent( agent_configs=self.config.get_agent_configs(), github_token=github_token, selected_repository=selected_repository, + selected_branch=selected_branch, initial_message=initial_message, ) except Exception as e: From cb5e7f0130af4dd1f701bcd693f7142711f9a5d2 Mon Sep 17 00:00:00 2001 From: tofarr Date: Thu, 13 Feb 2025 12:24:44 +0000 Subject: [PATCH 03/44] Agent session no longer stuck in starting on raised exception (#6703) --- .../runtime/impl/remote/remote_runtime.py | 5 +- openhands/server/session/agent_session.py | 69 ++++++++++--------- 2 files changed, 38 insertions(+), 36 deletions(-) diff --git a/openhands/runtime/impl/remote/remote_runtime.py b/openhands/runtime/impl/remote/remote_runtime.py index 068461cc61fb..56b0ec28caa8 100644 --- a/openhands/runtime/impl/remote/remote_runtime.py +++ b/openhands/runtime/impl/remote/remote_runtime.py @@ -92,8 +92,9 @@ def _get_action_execution_server_host(self): async def connect(self): try: await call_sync_from_async(self._start_or_attach_to_runtime) - except AgentRuntimeNotReadyError: - self.log('error', 'Runtime failed to start, timed out before ready') + except Exception: + self.close() + self.log('error', 'Runtime failed to start') raise await call_sync_from_async(self.setup_initial_env) self._runtime_initialized = True diff --git a/openhands/server/session/agent_session.py b/openhands/server/session/agent_session.py index 31a31bd151fe..79b98733850a 100644 --- a/openhands/server/session/agent_session.py +++ b/openhands/server/session/agent_session.py @@ -99,42 +99,43 @@ async def start( return self._starting = True self._started_at = time.time() - self._create_security_analyzer(config.security.security_analyzer) - await self._create_runtime( - runtime_name=runtime_name, - config=config, - agent=agent, - github_token=github_token, - selected_repository=selected_repository, - selected_branch=selected_branch, - ) - - self.controller = self._create_controller( - agent, - config.security.confirmation_mode, - max_iterations, - max_budget_per_task=max_budget_per_task, - agent_to_llm_config=agent_to_llm_config, - agent_configs=agent_configs, - ) - if github_token: - self.event_stream.set_secrets( - { - 'github_token': github_token.get_secret_value(), - } - ) - if initial_message: - self.event_stream.add_event(initial_message, EventSource.USER) - self.event_stream.add_event( - ChangeAgentStateAction(AgentState.RUNNING), EventSource.ENVIRONMENT - ) - else: - self.event_stream.add_event( - ChangeAgentStateAction(AgentState.AWAITING_USER_INPUT), - EventSource.ENVIRONMENT, + try: + self._create_security_analyzer(config.security.security_analyzer) + await self._create_runtime( + runtime_name=runtime_name, + config=config, + agent=agent, + github_token=github_token, + selected_repository=selected_repository, + selected_branch=selected_branch, ) - self._starting = False + self.controller = self._create_controller( + agent, + config.security.confirmation_mode, + max_iterations, + max_budget_per_task=max_budget_per_task, + agent_to_llm_config=agent_to_llm_config, + agent_configs=agent_configs, + ) + if github_token: + self.event_stream.set_secrets( + { + 'github_token': github_token.get_secret_value(), + } + ) + if initial_message: + self.event_stream.add_event(initial_message, EventSource.USER) + self.event_stream.add_event( + ChangeAgentStateAction(AgentState.RUNNING), EventSource.ENVIRONMENT + ) + else: + self.event_stream.add_event( + ChangeAgentStateAction(AgentState.AWAITING_USER_INPUT), + EventSource.ENVIRONMENT, + ) + finally: + self._starting = False async def close(self): """Closes the Agent session""" From 653168fc3d234fc56908159f97577ad33aa1bc81 Mon Sep 17 00:00:00 2001 From: mamoodi Date: Thu, 13 Feb 2025 10:22:05 -0500 Subject: [PATCH 04/44] Release 0.24.0 (#6689) --- Development.md | 2 +- README.md | 6 +++--- containers/dev/compose.yml | 2 +- docker-compose.yml | 2 +- .../current/usage/how-to/cli-mode.md | 4 ++-- .../current/usage/how-to/headless-mode.md | 4 ++-- .../current/usage/installation.mdx | 6 +++--- .../current/usage/runtimes.md | 2 +- .../current/usage/how-to/cli-mode.md | 4 ++-- .../current/usage/how-to/headless-mode.md | 4 ++-- .../current/usage/installation.mdx | 6 +++--- .../current/usage/runtimes.md | 2 +- docs/modules/usage/how-to/cli-mode.md | 4 ++-- docs/modules/usage/how-to/headless-mode.md | 4 ++-- docs/modules/usage/installation.mdx | 6 +++--- docs/modules/usage/runtimes.md | 2 +- frontend/package-lock.json | 4 ++-- frontend/package.json | 2 +- pyproject.toml | 4 +--- 19 files changed, 34 insertions(+), 36 deletions(-) diff --git a/Development.md b/Development.md index 996d88807b74..8d6e35751879 100644 --- a/Development.md +++ b/Development.md @@ -100,7 +100,7 @@ poetry run pytest ./tests/unit/test_*.py To reduce build time (e.g., if no changes were made to the client-runtime component), you can use an existing Docker container image by setting the SANDBOX_RUNTIME_CONTAINER_IMAGE environment variable to the desired Docker image. -Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.23-nikolaik` +Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.24-nikolaik` ## Develop inside Docker container diff --git a/README.md b/README.md index a034a64faef7..22caad34c99f 100644 --- a/README.md +++ b/README.md @@ -43,17 +43,17 @@ See the [Running OpenHands](https://docs.all-hands.dev/modules/usage/installatio system requirements and more information. ```bash -docker pull docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik +docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik docker run -it --rm --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e LOG_ALL_EVENTS=true \ -v /var/run/docker.sock:/var/run/docker.sock \ -v ~/.openhands-state:/.openhands-state \ -p 3000:3000 \ --add-host host.docker.internal:host-gateway \ --name openhands-app \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 + docker.all-hands.dev/all-hands-ai/openhands:0.24 ``` You'll find OpenHands running at [http://localhost:3000](http://localhost:3000)! diff --git a/containers/dev/compose.yml b/containers/dev/compose.yml index 500129f14d52..50c8ed04563f 100644 --- a/containers/dev/compose.yml +++ b/containers/dev/compose.yml @@ -11,7 +11,7 @@ services: - BACKEND_HOST=${BACKEND_HOST:-"0.0.0.0"} - SANDBOX_API_HOSTNAME=host.docker.internal # - - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.23-nikolaik} + - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.24-nikolaik} - SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} - WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace} ports: diff --git a/docker-compose.yml b/docker-compose.yml index f20945ecdeaa..4353b7b6bb5b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,7 +7,7 @@ services: image: openhands:latest container_name: openhands-app-${DATE:-} environment: - - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik} + - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik} #- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of openhands-state for this user - WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace} ports: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md index 89fdbc9e4124..6a666e91f8d3 100644 --- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md @@ -52,7 +52,7 @@ LLM_API_KEY="sk_test_12345" ```bash docker run -it \ --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e SANDBOX_USER_ID=$(id -u) \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -e LLM_API_KEY=$LLM_API_KEY \ @@ -61,7 +61,7 @@ docker run -it \ -v /var/run/docker.sock:/var/run/docker.sock \ --add-host host.docker.internal:host-gateway \ --name openhands-app-$(date +%Y%m%d%H%M%S) \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 \ + docker.all-hands.dev/all-hands-ai/openhands:0.24 \ python -m openhands.core.cli ``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md index f50bce6a2fdd..a72cd57f0cc1 100644 --- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md @@ -46,7 +46,7 @@ LLM_API_KEY="sk_test_12345" ```bash docker run -it \ --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e SANDBOX_USER_ID=$(id -u) \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -e LLM_API_KEY=$LLM_API_KEY \ @@ -56,6 +56,6 @@ docker run -it \ -v /var/run/docker.sock:/var/run/docker.sock \ --add-host host.docker.internal:host-gateway \ --name openhands-app-$(date +%Y%m%d%H%M%S) \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 \ + docker.all-hands.dev/all-hands-ai/openhands:0.24 \ python -m openhands.core.main -t "write a bash script that prints hi" --no-auto-continue ``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx index 2d92e0d202a3..6a1789214923 100644 --- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/installation.mdx @@ -13,16 +13,16 @@ La façon la plus simple d'exécuter OpenHands est avec Docker. ```bash -docker pull docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik +docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik docker run -it --rm --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e LOG_ALL_EVENTS=true \ -v /var/run/docker.sock:/var/run/docker.sock \ -p 3000:3000 \ --add-host host.docker.internal:host-gateway \ --name openhands-app \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 + docker.all-hands.dev/all-hands-ai/openhands:0.24 ``` Vous pouvez également exécuter OpenHands en mode [headless scriptable](https://docs.all-hands.dev/modules/usage/how-to/headless-mode), en tant que [CLI interactive](https://docs.all-hands.dev/modules/usage/how-to/cli-mode), ou en utilisant l'[Action GitHub OpenHands](https://docs.all-hands.dev/modules/usage/how-to/github-action). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md index 03c172540daf..865489d34841 100644 --- a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/runtimes.md @@ -13,7 +13,7 @@ C'est le Runtime par défaut qui est utilisé lorsque vous démarrez OpenHands. ``` docker run # ... - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -v /var/run/docker.sock:/var/run/docker.sock \ # ... ``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md index 92b3e07891fe..57b95b719570 100644 --- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/cli-mode.md @@ -50,7 +50,7 @@ LLM_API_KEY="sk_test_12345" ```bash docker run -it \ --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e SANDBOX_USER_ID=$(id -u) \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -e LLM_API_KEY=$LLM_API_KEY \ @@ -59,7 +59,7 @@ docker run -it \ -v /var/run/docker.sock:/var/run/docker.sock \ --add-host host.docker.internal:host-gateway \ --name openhands-app-$(date +%Y%m%d%H%M%S) \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 \ + docker.all-hands.dev/all-hands-ai/openhands:0.24 \ python -m openhands.core.cli ``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md index a5909345ffa1..44a4b5bc6f63 100644 --- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/how-to/headless-mode.md @@ -47,7 +47,7 @@ LLM_API_KEY="sk_test_12345" ```bash docker run -it \ --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e SANDBOX_USER_ID=$(id -u) \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -e LLM_API_KEY=$LLM_API_KEY \ @@ -57,6 +57,6 @@ docker run -it \ -v /var/run/docker.sock:/var/run/docker.sock \ --add-host host.docker.internal:host-gateway \ --name openhands-app-$(date +%Y%m%d%H%M%S) \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 \ + docker.all-hands.dev/all-hands-ai/openhands:0.24 \ python -m openhands.core.main -t "write a bash script that prints hi" --no-auto-continue ``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx index 4dde1f31a525..2d20773af4bc 100644 --- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/installation.mdx @@ -11,16 +11,16 @@ 在 Docker 中运行 OpenHands 是最简单的方式。 ```bash -docker pull docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik +docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik docker run -it --rm --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e LOG_ALL_EVENTS=true \ -v /var/run/docker.sock:/var/run/docker.sock \ -p 3000:3000 \ --add-host host.docker.internal:host-gateway \ --name openhands-app \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 + docker.all-hands.dev/all-hands-ai/openhands:0.24 ``` 你也可以在可脚本化的[无头模式](https://docs.all-hands.dev/modules/usage/how-to/headless-mode)下运行 OpenHands,作为[交互式 CLI](https://docs.all-hands.dev/modules/usage/how-to/cli-mode),或使用 [OpenHands GitHub Action](https://docs.all-hands.dev/modules/usage/how-to/github-action)。 diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md index 4f51c50ff69c..5786ce571c81 100644 --- a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/runtimes.md @@ -11,7 +11,7 @@ ``` docker run # ... - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -v /var/run/docker.sock:/var/run/docker.sock \ # ... ``` diff --git a/docs/modules/usage/how-to/cli-mode.md b/docs/modules/usage/how-to/cli-mode.md index d3dfb6d05173..612f1590eac9 100644 --- a/docs/modules/usage/how-to/cli-mode.md +++ b/docs/modules/usage/how-to/cli-mode.md @@ -35,7 +35,7 @@ To run OpenHands in CLI mode with Docker: ```bash docker run -it \ --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e SANDBOX_USER_ID=$(id -u) \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -e LLM_API_KEY=$LLM_API_KEY \ @@ -45,7 +45,7 @@ docker run -it \ -v ~/.openhands-state:/.openhands-state \ --add-host host.docker.internal:host-gateway \ --name openhands-app-$(date +%Y%m%d%H%M%S) \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 \ + docker.all-hands.dev/all-hands-ai/openhands:0.24 \ python -m openhands.core.cli ``` diff --git a/docs/modules/usage/how-to/headless-mode.md b/docs/modules/usage/how-to/headless-mode.md index 1bcb5c71ff13..b751dc3000d1 100644 --- a/docs/modules/usage/how-to/headless-mode.md +++ b/docs/modules/usage/how-to/headless-mode.md @@ -32,7 +32,7 @@ To run OpenHands in Headless mode with Docker: ```bash docker run -it \ --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e SANDBOX_USER_ID=$(id -u) \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -e LLM_API_KEY=$LLM_API_KEY \ @@ -43,7 +43,7 @@ docker run -it \ -v ~/.openhands-state:/.openhands-state \ --add-host host.docker.internal:host-gateway \ --name openhands-app-$(date +%Y%m%d%H%M%S) \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 \ + docker.all-hands.dev/all-hands-ai/openhands:0.24 \ python -m openhands.core.main -t "write a bash script that prints hi" ``` diff --git a/docs/modules/usage/installation.mdx b/docs/modules/usage/installation.mdx index b088f9579ca0..6a65befc38f6 100644 --- a/docs/modules/usage/installation.mdx +++ b/docs/modules/usage/installation.mdx @@ -54,17 +54,17 @@ The easiest way to run OpenHands is in Docker. ```bash -docker pull docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik +docker pull docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik docker run -it --rm --pull=always \ - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -e LOG_ALL_EVENTS=true \ -v /var/run/docker.sock:/var/run/docker.sock \ -v ~/.openhands-state:/.openhands-state \ -p 3000:3000 \ --add-host host.docker.internal:host-gateway \ --name openhands-app \ - docker.all-hands.dev/all-hands-ai/openhands:0.23 + docker.all-hands.dev/all-hands-ai/openhands:0.24 ``` You'll find OpenHands running at http://localhost:3000! diff --git a/docs/modules/usage/runtimes.md b/docs/modules/usage/runtimes.md index 9205879a1b9a..740a53b00482 100644 --- a/docs/modules/usage/runtimes.md +++ b/docs/modules/usage/runtimes.md @@ -16,7 +16,7 @@ some flags being passed to `docker run` that make this possible: ``` docker run # ... - -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.23-nikolaik \ + -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.24-nikolaik \ -v /var/run/docker.sock:/var/run/docker.sock \ # ... ``` diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 43278fa1cea1..1712456e836f 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,12 +1,12 @@ { "name": "openhands-frontend", - "version": "0.23.0", + "version": "0.24.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "openhands-frontend", - "version": "0.23.0", + "version": "0.24.0", "dependencies": { "@heroui/react": "2.6.14", "@monaco-editor/react": "^4.7.0-rc.0", diff --git a/frontend/package.json b/frontend/package.json index d776fc5dab25..966f1dbab164 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "openhands-frontend", - "version": "0.23.0", + "version": "0.24.0", "private": true, "type": "module", "engines": { diff --git a/pyproject.toml b/pyproject.toml index 70e5836cc4a1..755f75981dbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "openhands-ai" -version = "0.23.0" +version = "0.24.0" description = "OpenHands: Code Less, Make More" authors = ["OpenHands"] license = "MIT" @@ -105,7 +105,6 @@ reportlab = "*" [tool.coverage.run] concurrency = ["gevent"] - [tool.poetry.group.runtime.dependencies] jupyterlab = "*" notebook = "*" @@ -134,7 +133,6 @@ ignore = ["D1"] [tool.ruff.lint.pydocstyle] convention = "google" - [tool.poetry.group.evaluation.dependencies] streamlit = "*" whatthepatch = "*" From d46d99a35e37ae08ee68ffdf045caea30f020c07 Mon Sep 17 00:00:00 2001 From: tofarr Date: Thu, 13 Feb 2025 16:39:22 +0000 Subject: [PATCH 05/44] More effective remote runtime identification (#6714) --- openhands/runtime/impl/remote/remote_runtime.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openhands/runtime/impl/remote/remote_runtime.py b/openhands/runtime/impl/remote/remote_runtime.py index 56b0ec28caa8..0340f7d0a088 100644 --- a/openhands/runtime/impl/remote/remote_runtime.py +++ b/openhands/runtime/impl/remote/remote_runtime.py @@ -308,7 +308,7 @@ def _wait_until_alive_impl(self): self.log('debug', f'Waiting for runtime to be alive at url: {self.runtime_url}') with self._send_runtime_api_request( 'GET', - f'{self.config.sandbox.remote_runtime_api_url}/sessions/{self.sid}', + f'{self.config.sandbox.remote_runtime_api_url}/runtime/{self.runtime_id}', ) as runtime_info_response: runtime_data = runtime_info_response.json() assert 'runtime_id' in runtime_data From 341b695ad3029edae2d3650a34305ab2ee117bec Mon Sep 17 00:00:00 2001 From: Calvin Smith Date: Thu, 13 Feb 2025 11:50:05 -0700 Subject: [PATCH 06/44] fix: Filter `AgentCondensationObservation` events from agent state (#6705) Co-authored-by: Calvin Smith --- openhands/controller/agent_controller.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openhands/controller/agent_controller.py b/openhands/controller/agent_controller.py index 7189371f0bc3..e5a0b24f9694 100644 --- a/openhands/controller/agent_controller.py +++ b/openhands/controller/agent_controller.py @@ -77,6 +77,7 @@ class AgentController: NullObservation, ChangeAgentStateAction, AgentStateChangedObservation, + AgentCondensationObservation, ) def __init__( From b197e0af478549c2bc04b67df85f3112ccab4890 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 19:09:50 +0000 Subject: [PATCH 07/44] chore(deps): bump the version-all group across 1 directory with 5 updates (#6712) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: amanape <83104063+amanape@users.noreply.github.com> --- frontend/package-lock.json | 160 ++++++++++++++++++------------------- frontend/package.json | 8 +- 2 files changed, 84 insertions(+), 84 deletions(-) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 1712456e836f..0115a8c6934b 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -28,7 +28,7 @@ "isbot": "^5.1.22", "jose": "^5.9.4", "monaco-editor": "^0.52.2", - "posthog-js": "^1.216.0", + "posthog-js": "^1.217.2", "react": "^19.0.0", "react-dom": "^19.0.0", "react-highlight": "^0.15.0", @@ -58,7 +58,7 @@ "@testing-library/jest-dom": "^6.6.1", "@testing-library/react": "^16.2.0", "@testing-library/user-event": "^14.6.1", - "@types/node": "^22.13.1", + "@types/node": "^22.13.2", "@types/react": "^19.0.8", "@types/react-dom": "^19.0.3", "@types/react-highlight": "^0.12.8", @@ -82,8 +82,8 @@ "jsdom": "^26.0.0", "lint-staged": "^15.4.3", "msw": "^2.6.6", - "postcss": "^8.5.1", - "prettier": "^3.4.2", + "postcss": "^8.5.2", + "prettier": "^3.5.1", "tailwindcss": "^3.4.17", "typescript": "^5.7.3", "vite-plugin-svgr": "^4.2.0", @@ -95,9 +95,9 @@ } }, "node_modules/@adobe/css-tools": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.1.tgz", - "integrity": "sha512-12WGKBQzjUAI4ayyF4IAtfw2QR/IDoqk6jTddXDhtYTJF9ASmoE1zst7cVtP0aL/F1jUJL5r+JxKXKEgHNbEUQ==", + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.2.tgz", + "integrity": "sha512-baYZExFpsdkBNuvGKTKWCwKH57HRZLVtycZS05WTQNVOiXVSeAki3nU35zlRbToeMW8aHlJfyS+1C4BOv27q0A==", "dev": true, "license": "MIT" }, @@ -3519,24 +3519,21 @@ "license": "MIT" }, "node_modules/@monaco-editor/loader": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.4.0.tgz", - "integrity": "sha512-00ioBig0x642hytVspPl7DbQyaSWRaolYie/UFNjoTdvoKPzo6xrXLhTk9ixgIKcLH5b5vDOjVNiGyY+uDCUlg==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.5.0.tgz", + "integrity": "sha512-hKoGSM+7aAc7eRTRjpqAZucPmoNOC4UUbknb/VNoTkEIkCPhqV8LfbsgM1webRM7S/z21eHEx9Fkwx8Z/C/+Xw==", "license": "MIT", "dependencies": { "state-local": "^1.0.6" - }, - "peerDependencies": { - "monaco-editor": ">= 0.21.0 < 1" } }, "node_modules/@monaco-editor/react": { - "version": "4.7.0-rc.0", - "resolved": "https://registry.npmjs.org/@monaco-editor/react/-/react-4.7.0-rc.0.tgz", - "integrity": "sha512-YfjXkDK0bcwS0zo8PXptvQdCQfOPPtzGsAzmIv7PnoUGFdIohsR+NVDyjbajMddF+3cWUm/3q9NzP/DUke9a+w==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@monaco-editor/react/-/react-4.7.0.tgz", + "integrity": "sha512-cyzXQCtO47ydzxpQtCGSQGOC8Gk3ZUeBXFAxD+CWXYFo5OqZyZUonFl0DwUlTyAfRHntBfw2p3w4s9R6oe1eCA==", "license": "MIT", "dependencies": { - "@monaco-editor/loader": "^1.4.0" + "@monaco-editor/loader": "^1.5.0" }, "peerDependencies": { "monaco-editor": ">= 0.25.0 < 1", @@ -6430,9 +6427,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.13.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.1.tgz", - "integrity": "sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew==", + "version": "22.13.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.2.tgz", + "integrity": "sha512-Z+r8y3XL9ZpI2EY52YYygAFmo2/oWfNSj4BCpAXE2McAexDk8VcnBMGC9Djn9gTKt4d2T/hhXqmPzo4hfIXtTg==", "devOptional": true, "license": "MIT", "dependencies": { @@ -6713,16 +6710,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.23.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.23.0.tgz", - "integrity": "sha512-uB/+PSo6Exu02b5ZEiVtmY6RVYO7YU5xqgzTIVZwTHvvK3HsL8tZZHFaTLFtRG3CsV4A5mhOv+NZx5BlhXPyIA==", + "version": "8.24.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.24.0.tgz", + "integrity": "sha512-07rLuUBElvvEb1ICnafYWr4hk8/U7X9RDCOqd9JcAMtjh/9oRmcfN4yGzbPVirgMR0+HLVHehmu19CWeh7fsmQ==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.23.0", - "@typescript-eslint/types": "8.23.0", - "@typescript-eslint/typescript-estree": "8.23.0" + "@typescript-eslint/scope-manager": "8.24.0", + "@typescript-eslint/types": "8.24.0", + "@typescript-eslint/typescript-estree": "8.24.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -6737,14 +6734,14 @@ } }, "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { - "version": "8.23.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.23.0.tgz", - "integrity": "sha512-OGqo7+dXHqI7Hfm+WqkZjKjsiRtFUQHPdGMXzk5mYXhJUedO7e/Y7i8AK3MyLMgZR93TX4bIzYrfyVjLC+0VSw==", + "version": "8.24.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.24.0.tgz", + "integrity": "sha512-HZIX0UByphEtdVBKaQBgTDdn9z16l4aTUz8e8zPQnyxwHBtf5vtl1L+OhH+m1FGV9DrRmoDuYKqzVrvWDcDozw==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.23.0", - "@typescript-eslint/visitor-keys": "8.23.0" + "@typescript-eslint/types": "8.24.0", + "@typescript-eslint/visitor-keys": "8.24.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -6755,9 +6752,9 @@ } }, "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { - "version": "8.23.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.23.0.tgz", - "integrity": "sha512-1sK4ILJbCmZOTt9k4vkoulT6/y5CHJ1qUYxqpF1K/DBAd8+ZUL4LlSCxOssuH5m4rUaaN0uS0HlVPvd45zjduQ==", + "version": "8.24.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.24.0.tgz", + "integrity": "sha512-VacJCBTyje7HGAw7xp11q439A+zeGG0p0/p2zsZwpnMzjPB5WteaWqt4g2iysgGFafrqvyLWqq6ZPZAOCoefCw==", "dev": true, "license": "MIT", "engines": { @@ -6769,14 +6766,14 @@ } }, "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "8.23.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.23.0.tgz", - "integrity": "sha512-LcqzfipsB8RTvH8FX24W4UUFk1bl+0yTOf9ZA08XngFwMg4Kj8A+9hwz8Cr/ZS4KwHrmo9PJiLZkOt49vPnuvQ==", + "version": "8.24.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.24.0.tgz", + "integrity": "sha512-ITjYcP0+8kbsvT9bysygfIfb+hBj6koDsu37JZG7xrCiy3fPJyNmfVtaGsgTUSEuTzcvME5YI5uyL5LD1EV5ZQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.23.0", - "@typescript-eslint/visitor-keys": "8.23.0", + "@typescript-eslint/types": "8.24.0", + "@typescript-eslint/visitor-keys": "8.24.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -6796,13 +6793,13 @@ } }, "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "8.23.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.23.0.tgz", - "integrity": "sha512-oWWhcWDLwDfu++BGTZcmXWqpwtkwb5o7fxUIGksMQQDSdPW9prsSnfIOZMlsj4vBOSrcnjIUZMiIjODgGosFhQ==", + "version": "8.24.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.24.0.tgz", + "integrity": "sha512-kArLq83QxGLbuHrTMoOEWO+l2MwsNS2TGISEdx8xgqpkbytB07XmlQyQdNDrCc1ecSqx0cnmhGvpX+VBwqqSkg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.23.0", + "@typescript-eslint/types": "8.24.0", "eslint-visitor-keys": "^4.2.0" }, "engines": { @@ -6996,9 +6993,9 @@ } }, "node_modules/@vitest/runner/node_modules/pathe": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.2.tgz", - "integrity": "sha512-15Ztpk+nov8DR524R4BF7uEuzESgzUEAV4Ah7CUMNGXdE5ELuvxElxGXndBl32vMSsWa1jpNf22Z+Er3sKwq+w==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", "dev": true, "license": "MIT" }, @@ -7018,9 +7015,9 @@ } }, "node_modules/@vitest/snapshot/node_modules/pathe": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.2.tgz", - "integrity": "sha512-15Ztpk+nov8DR524R4BF7uEuzESgzUEAV4Ah7CUMNGXdE5ELuvxElxGXndBl32vMSsWa1jpNf22Z+Er3sKwq+w==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", "dev": true, "license": "MIT" }, @@ -7718,9 +7715,9 @@ } }, "node_modules/call-bind-apply-helpers": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.1.tgz", - "integrity": "sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", "license": "MIT", "dependencies": { "es-errors": "^1.3.0", @@ -8178,9 +8175,9 @@ } }, "node_modules/compression": { - "version": "1.7.5", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.5.tgz", - "integrity": "sha512-bQJ0YRck5ak3LgtnpKkiabX5pNF7tMUh1BSy2ZBOTh0Dim0BUu6aPPwByIns6/A5Prh8PufSPerMDUklpzes2Q==", + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.8.0.tgz", + "integrity": "sha512-k6WLKfunuqCYD3t6AsuPGvQWaKwuLLh2/xHNcX4qE+vIfDNXpSqnrhwA7O53R7WVQUnt8dVAIW+YHr7xTgOgGA==", "license": "MIT", "dependencies": { "bytes": "3.1.2", @@ -8756,9 +8753,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.96", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.96.tgz", - "integrity": "sha512-8AJUW6dh75Fm/ny8+kZKJzI1pgoE8bKLZlzDU2W1ENd+DXKJrx7I7l9hb8UWR4ojlnb5OlixMt00QWiYJoVw1w==", + "version": "1.5.98", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.98.tgz", + "integrity": "sha512-bI/LbtRBxU2GzK7KK5xxFd2y9Lf9XguHooPYbcXWy6wUoT8NMnffsvRhPmSeUHLSDKAEtKuTaEtK4Ms15zkIEA==", "license": "ISC" }, "node_modules/emoji-regex": { @@ -9044,13 +9041,16 @@ } }, "node_modules/es-shim-unscopables": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", - "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", + "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", "dev": true, "license": "MIT", "dependencies": { - "hasown": "^2.0.0" + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" } }, "node_modules/es-to-primitive": { @@ -10128,9 +10128,9 @@ } }, "node_modules/for-each": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.4.tgz", - "integrity": "sha512-kKaIINnFpzW6ffJNDjjyjrk21BkDx38c0xa/klsT8VzLCaMEefv4ZTacrcVR4DmgTeBra++jMDAfS/tS799YDw==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", "dev": true, "license": "MIT", "dependencies": { @@ -14385,9 +14385,9 @@ } }, "node_modules/postcss": { - "version": "8.5.1", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz", - "integrity": "sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ==", + "version": "8.5.2", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.2.tgz", + "integrity": "sha512-MjOadfU3Ys9KYoX0AdkBlFEF1Vx37uCCeN4ZHnmwm9FfpbsGWMZeBLMmmpY+6Ocqod7mkdZ0DT31OlbsFrLlkA==", "funding": [ { "type": "opencollective", @@ -14542,9 +14542,9 @@ "license": "MIT" }, "node_modules/posthog-js": { - "version": "1.216.1", - "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.216.1.tgz", - "integrity": "sha512-ZJxSfA1caZHxVTHfL8rNqwGGUq1JEgKq2/dIwnJ9hHaiv6ALAbc8Rm7Zda8SpTqKk/pEcwrdwY3LYAj2FF3K8w==", + "version": "1.217.4", + "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.217.4.tgz", + "integrity": "sha512-ZIOb75F1pdMZl6e7C4mgH2accKArLA2RG3zMEjeils+3J/cylwgcr2Iw0QtzSLqQVvR7AFRRbXMZXUWsiB2zyA==", "license": "MIT", "dependencies": { "core-js": "^3.38.1", @@ -14580,9 +14580,9 @@ } }, "node_modules/prettier": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.0.tgz", - "integrity": "sha512-quyMrVt6svPS7CjQ9gKb3GLEX/rl3BCL2oa/QkNcXv4YNVBC9olt3s+H7ukto06q7B1Qz46PbrKLO34PR6vXcA==", + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.1.tgz", + "integrity": "sha512-hPpFQvHwL3Qv5AdRvBFMhnKo4tYxp0ReXiPn2bxkiohEX6mBeBwEpBSQTkD458RaaDKQMYSp4hX4UtfUTA5wDw==", "dev": true, "license": "MIT", "bin": { @@ -17770,9 +17770,9 @@ } }, "node_modules/vitest/node_modules/pathe": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.2.tgz", - "integrity": "sha512-15Ztpk+nov8DR524R4BF7uEuzESgzUEAV4Ah7CUMNGXdE5ELuvxElxGXndBl32vMSsWa1jpNf22Z+Er3sKwq+w==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", "dev": true, "license": "MIT" }, @@ -17874,9 +17874,9 @@ } }, "node_modules/whatwg-url": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.1.0.tgz", - "integrity": "sha512-jlf/foYIKywAt3x/XWKZ/3rz8OSJPiWktjmk891alJUEjiVxKX9LEO92qH3hv4aJ0mN3MWPvGMCy8jQi95xK4w==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.1.1.tgz", + "integrity": "sha512-mDGf9diDad/giZ/Sm9Xi2YcyzaFpbdLpJPr+E9fSkyQ7KpQD4SdFcugkRQYzhmfI4KeV4Qpnn2sKPdo+kmsgRQ==", "dev": true, "license": "MIT", "dependencies": { diff --git a/frontend/package.json b/frontend/package.json index 966f1dbab164..2781b75eabbc 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -27,7 +27,7 @@ "isbot": "^5.1.22", "jose": "^5.9.4", "monaco-editor": "^0.52.2", - "posthog-js": "^1.216.0", + "posthog-js": "^1.217.2", "react": "^19.0.0", "react-dom": "^19.0.0", "react-highlight": "^0.15.0", @@ -85,7 +85,7 @@ "@testing-library/jest-dom": "^6.6.1", "@testing-library/react": "^16.2.0", "@testing-library/user-event": "^14.6.1", - "@types/node": "^22.13.1", + "@types/node": "^22.13.2", "@types/react": "^19.0.8", "@types/react-dom": "^19.0.3", "@types/react-highlight": "^0.12.8", @@ -109,8 +109,8 @@ "jsdom": "^26.0.0", "lint-staged": "^15.4.3", "msw": "^2.6.6", - "postcss": "^8.5.1", - "prettier": "^3.4.2", + "postcss": "^8.5.2", + "prettier": "^3.5.1", "tailwindcss": "^3.4.17", "typescript": "^5.7.3", "vite-plugin-svgr": "^4.2.0", From ef12bc5381d74dd53e9f20354264593716e91f8e Mon Sep 17 00:00:00 2001 From: Boxuan Li Date: Thu, 13 Feb 2025 12:05:03 -0800 Subject: [PATCH 08/44] Evaluation harness: Add agent config option (#6662) --- evaluation/benchmarks/gaia/run_infer.py | 14 ++++- .../benchmarks/gaia/scripts/run_infer.sh | 5 ++ .../benchmarks/the_agent_company/run_infer.py | 17 +++++- .../the_agent_company/scripts/run_infer.sh | 26 +++++--- evaluation/utils/shared.py | 4 ++ openhands/core/config/__init__.py | 2 + openhands/core/config/utils.py | 60 ++++++++++++++++++- tests/unit/test_arg_parser.py | 3 +- tests/unit/test_config.py | 30 ++++++++++ 9 files changed, 149 insertions(+), 12 deletions(-) diff --git a/evaluation/benchmarks/gaia/run_infer.py b/evaluation/benchmarks/gaia/run_infer.py index a8b442819267..2fdab0b2927a 100644 --- a/evaluation/benchmarks/gaia/run_infer.py +++ b/evaluation/benchmarks/gaia/run_infer.py @@ -25,6 +25,7 @@ get_llm_config_arg, get_parser, ) +from openhands.core.config.utils import get_agent_config_arg from openhands.core.logger import openhands_logger as logger from openhands.core.main import create_runtime, run_controller from openhands.events.action import AgentFinishAction, CmdRunAction, MessageAction @@ -63,8 +64,12 @@ def get_config( workspace_mount_path=None, ) config.set_llm_config(metadata.llm_config) - agent_config = config.get_agent_config(metadata.agent_class) - agent_config.enable_prompt_extensions = False + if metadata.agent_config: + config.set_agent_config(metadata.agent_config, metadata.agent_class) + else: + logger.info('Agent config not provided, using default settings') + agent_config = config.get_agent_config(metadata.agent_class) + agent_config.enable_prompt_extensions = False return config @@ -238,6 +243,10 @@ def process_instance( ) args, _ = parser.parse_known_args() + agent_config = None + if args.agent_config: + agent_config = get_agent_config_arg(args.agent_config) + llm_config = None if args.llm_config: llm_config = get_llm_config_arg(args.llm_config) @@ -256,6 +265,7 @@ def process_instance( eval_output_dir=args.eval_output_dir, data_split=args.data_split, details={'gaia-level': args.level}, + agent_config=agent_config, ) dataset = load_dataset('gaia-benchmark/GAIA', args.level) diff --git a/evaluation/benchmarks/gaia/scripts/run_infer.sh b/evaluation/benchmarks/gaia/scripts/run_infer.sh index 4b2f8f73dffa..217809880d40 100755 --- a/evaluation/benchmarks/gaia/scripts/run_infer.sh +++ b/evaluation/benchmarks/gaia/scripts/run_infer.sh @@ -9,6 +9,7 @@ AGENT=$3 EVAL_LIMIT=$4 LEVELS=$5 NUM_WORKERS=$6 +AGENT_CONFIG=$7 if [ -z "$NUM_WORKERS" ]; then NUM_WORKERS=1 @@ -49,5 +50,9 @@ if [ -n "$EVAL_LIMIT" ]; then COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT" fi +if [ -n "$AGENT_CONFIG" ]; then + echo "AGENT_CONFIG: $AGENT_CONFIG" + COMMAND="$COMMAND --agent-config $AGENT_CONFIG" + # Run the command eval $COMMAND diff --git a/evaluation/benchmarks/the_agent_company/run_infer.py b/evaluation/benchmarks/the_agent_company/run_infer.py index 5cd7c027e20f..cbfbb386fdde 100644 --- a/evaluation/benchmarks/the_agent_company/run_infer.py +++ b/evaluation/benchmarks/the_agent_company/run_infer.py @@ -18,9 +18,11 @@ AppConfig, LLMConfig, SandboxConfig, + get_agent_config_arg, get_llm_config_arg, get_parser, ) +from openhands.core.config.agent_config import AgentConfig from openhands.core.logger import openhands_logger as logger from openhands.core.main import create_runtime, run_controller from openhands.events.action import CmdRunAction, MessageAction @@ -34,6 +36,7 @@ def get_config( task_short_name: str, mount_path_on_host: str, llm_config: LLMConfig, + agent_config: AgentConfig, ) -> AppConfig: config = AppConfig( run_as_openhands=False, @@ -58,6 +61,14 @@ def get_config( workspace_mount_path_in_sandbox='/outputs', ) config.set_llm_config(llm_config) + if agent_config: + config.set_agent_config(agent_config) + else: + logger.info('Agent config not provided, using default settings') + agent_config = AgentConfig( + enable_prompt_extensions=False, + ) + config.set_agent_config(agent_config) return config @@ -215,6 +226,10 @@ def run_evaluator( ) args, _ = parser.parse_known_args() + agent_config: AgentConfig | None = None + if args.agent_config: + agent_config = get_agent_config_arg(args.agent_config) + agent_llm_config: LLMConfig | None = None if args.agent_llm_config: agent_llm_config = get_llm_config_arg(args.agent_llm_config) @@ -255,7 +270,7 @@ def run_evaluator( else: temp_dir = tempfile.mkdtemp() config: AppConfig = get_config( - args.task_image_name, task_short_name, temp_dir, agent_llm_config + args.task_image_name, task_short_name, temp_dir, agent_llm_config, agent_config ) runtime: Runtime = create_runtime(config) call_async_from_sync(runtime.connect) diff --git a/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh b/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh index b5bc7874c12e..3366c9826005 100755 --- a/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh +++ b/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh @@ -44,6 +44,10 @@ while [[ $# -gt 0 ]]; do ENV_LLM_CONFIG="$2" shift 2 ;; + --agent-config) + AGENT_CONFIG="$2" + shift 2 + ;; --outputs-path) OUTPUTS_PATH="$2" shift 2 @@ -140,13 +144,21 @@ while IFS= read -r task_image; do continue fi - export PYTHONPATH=evaluation/benchmarks/the_agent_company:\$PYTHONPATH && \ - poetry run python run_infer.py \ - --agent-llm-config "$AGENT_LLM_CONFIG" \ - --env-llm-config "$ENV_LLM_CONFIG" \ - --outputs-path "$OUTPUTS_PATH" \ - --server-hostname "$SERVER_HOSTNAME" \ - --task-image-name "$task_image" + # Build the Python command + COMMAND="poetry run python run_infer.py \ + --agent-llm-config \"$AGENT_LLM_CONFIG\" \ + --env-llm-config \"$ENV_LLM_CONFIG\" \ + --outputs-path \"$OUTPUTS_PATH\" \ + --server-hostname \"$SERVER_HOSTNAME\" \ + --task-image-name \"$task_image\"" + + # Add agent-config if it's defined + if [ -n "$AGENT_CONFIG" ]; then + COMMAND="$COMMAND --agent-config $AGENT_CONFIG" + fi + + export PYTHONPATH=evaluation/benchmarks/the_agent_company:$PYTHONPATH && \ + eval "$COMMAND" # Prune unused images and volumes docker image rm "$task_image" diff --git a/evaluation/utils/shared.py b/evaluation/utils/shared.py index 0f8ac8fa8332..7035d56e41ef 100644 --- a/evaluation/utils/shared.py +++ b/evaluation/utils/shared.py @@ -17,6 +17,7 @@ from openhands.controller.state.state import State from openhands.core.config import LLMConfig +from openhands.core.config.agent_config import AgentConfig from openhands.core.config.condenser_config import ( CondenserConfig, NoOpCondenserConfig, @@ -43,6 +44,7 @@ class EvalMetadata(BaseModel): agent_class: str llm_config: LLMConfig + agent_config: AgentConfig | None = None max_iterations: int eval_output_dir: str start_time: str @@ -167,6 +169,7 @@ def make_metadata( eval_output_dir: str, data_split: str | None = None, details: dict[str, Any] | None = None, + agent_config: AgentConfig | None = None, condenser_config: CondenserConfig | None = None, ) -> EvalMetadata: model_name = llm_config.model.split('/')[-1] @@ -189,6 +192,7 @@ def make_metadata( metadata = EvalMetadata( agent_class=agent_class, llm_config=llm_config, + agent_config=agent_config, max_iterations=max_iterations, eval_output_dir=eval_output_path, start_time=time.strftime('%Y-%m-%d %H:%M:%S'), diff --git a/openhands/core/config/__init__.py b/openhands/core/config/__init__.py index 2e0f87e32143..d653f3e70ac4 100644 --- a/openhands/core/config/__init__.py +++ b/openhands/core/config/__init__.py @@ -10,6 +10,7 @@ from openhands.core.config.security_config import SecurityConfig from openhands.core.config.utils import ( finalize_config, + get_agent_config_arg, get_llm_config_arg, get_parser, load_app_config, @@ -31,6 +32,7 @@ 'load_from_env', 'load_from_toml', 'finalize_config', + 'get_agent_config_arg', 'get_llm_config_arg', 'get_field_info', 'get_parser', diff --git a/openhands/core/config/utils.py b/openhands/core/config/utils.py index b6bf4b77abad..e0b1ee71adc3 100644 --- a/openhands/core/config/utils.py +++ b/openhands/core/config/utils.py @@ -298,7 +298,59 @@ def finalize_config(cfg: AppConfig): ) -# Utility function for command line --group argument +def get_agent_config_arg( + agent_config_arg: str, toml_file: str = 'config.toml' +) -> AgentConfig | None: + """Get a group of agent settings from the config file. + + A group in config.toml can look like this: + + ``` + [agent.default] + enable_prompt_extensions = false + ``` + + The user-defined group name, like "default", is the argument to this function. The function will load the AgentConfig object + with the settings of this group, from the config file, and set it as the AgentConfig object for the app. + + Note that the group must be under "agent" group, or in other words, the group name must start with "agent.". + + Args: + agent_config_arg: The group of agent settings to get from the config.toml file. + toml_file: Path to the configuration file to read from. Defaults to 'config.toml'. + + Returns: + AgentConfig: The AgentConfig object with the settings from the config file. + """ + # keep only the name, just in case + agent_config_arg = agent_config_arg.strip('[]') + + # truncate the prefix, just in case + if agent_config_arg.startswith('agent.'): + agent_config_arg = agent_config_arg[6:] + + logger.openhands_logger.debug(f'Loading agent config from {agent_config_arg}') + + # load the toml file + try: + with open(toml_file, 'r', encoding='utf-8') as toml_contents: + toml_config = toml.load(toml_contents) + except FileNotFoundError as e: + logger.openhands_logger.error(f'Config file not found: {e}') + return None + except toml.TomlDecodeError as e: + logger.openhands_logger.error( + f'Cannot parse agent group from {agent_config_arg}. Exception: {e}' + ) + return None + + # update the agent config with the specified section + if 'agent' in toml_config and agent_config_arg in toml_config['agent']: + return AgentConfig(**toml_config['agent'][agent_config_arg]) + logger.openhands_logger.debug(f'Loading from toml failed for {agent_config_arg}') + return None + + def get_llm_config_arg( llm_config_arg: str, toml_file: str = 'config.toml' ) -> LLMConfig | None: @@ -443,6 +495,12 @@ def get_parser() -> argparse.ArgumentParser: type=str, help='Replace default LLM ([llm] section in config.toml) config with the specified LLM config, e.g. "llama3" for [llm.llama3] section in config.toml', ) + parser.add_argument( + '--agent-config', + default=None, + type=str, + help='Replace default Agent ([agent] section in config.toml) config with the specified Agent config, e.g. "CodeAct" for [agent.CodeAct] section in config.toml', + ) parser.add_argument( '-n', '--name', diff --git a/tests/unit/test_arg_parser.py b/tests/unit/test_arg_parser.py index 51c736f19c05..b71cd5e2c18b 100644 --- a/tests/unit/test_arg_parser.py +++ b/tests/unit/test_arg_parser.py @@ -128,6 +128,7 @@ def test_help_message(capsys): '--eval-note EVAL_NOTE', '--eval-ids EVAL_IDS', '-l LLM_CONFIG, --llm-config LLM_CONFIG', + '--agent-config AGENT_CONFIG', '-n NAME, --name NAME', '--config-file CONFIG_FILE', '--no-auto-continue', @@ -137,4 +138,4 @@ def test_help_message(capsys): assert element in help_output, f"Expected '{element}' to be in the help message" option_count = help_output.count(' -') - assert option_count == 17, f'Expected 17 options, found {option_count}' + assert option_count == 18, f'Expected 18 options, found {option_count}' diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 5edfd64cda90..7aab02c0e019 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -9,6 +9,7 @@ AppConfig, LLMConfig, finalize_config, + get_agent_config_arg, get_llm_config_arg, load_from_env, load_from_toml, @@ -781,3 +782,32 @@ def test_get_agent_configs(default_config, temp_toml_file): assert codeact_config.memory_enabled is True browsing_config = default_config.get_agent_configs().get('BrowsingAgent') assert browsing_config.memory_max_threads == 10 + + +def test_get_agent_config_arg(temp_toml_file): + temp_toml = """ +[core] +max_iterations = 100 +max_budget_per_task = 4.0 + +[agent.CodeActAgent] +memory_enabled = true +enable_prompt_extensions = false + +[agent.BrowsingAgent] +memory_enabled = false +enable_prompt_extensions = true +memory_max_threads = 10 +""" + + with open(temp_toml_file, 'w') as f: + f.write(temp_toml) + + agent_config = get_agent_config_arg('CodeActAgent', temp_toml_file) + assert agent_config.memory_enabled + assert not agent_config.enable_prompt_extensions + + agent_config2 = get_agent_config_arg('BrowsingAgent', temp_toml_file) + assert not agent_config2.memory_enabled + assert agent_config2.enable_prompt_extensions + assert agent_config2.memory_max_threads == 10 From f5fccab1f60f70278902b9845bdee4d8a3e3b471 Mon Sep 17 00:00:00 2001 From: wtiger9218 Date: Thu, 13 Feb 2025 18:21:23 -0500 Subject: [PATCH 09/44] feat(resolver): implement gitlab resolver (#6458) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: José Luis Di Biase Co-authored-by: José Luis Di Biase Co-authored-by: Oriana Co-authored-by: Charlie Co-authored-by: Juan Manuel Daza <61162223+juanmanueldaza@users.noreply.github.com> Co-authored-by: Juan Manuel Daza Co-authored-by: Cody Kociemba Co-authored-by: Rohit Malhotra Co-authored-by: Engel Nyst --- .github/workflows/openhands-resolver.yml | 4 +- frontend/vite.config.ts | 3 + openhands/resolver/README.md | 41 +- openhands/resolver/github_issue.py | 21 - openhands/resolver/interfaces/github.py | 591 ++++++++ openhands/resolver/interfaces/gitlab.py | 577 +++++++ openhands/resolver/interfaces/issue.py | 123 ++ .../resolver/interfaces/issue_definitions.py | 400 +++++ openhands/resolver/issue_definitions.py | 806 ---------- openhands/resolver/resolve_all_issues.py | 50 +- openhands/resolver/resolve_issue.py | 124 +- openhands/resolver/resolver_output.py | 4 +- openhands/resolver/send_pull_request.py | 325 ++-- openhands/resolver/utils.py | 86 ++ .../{ => github}/test_guess_success.py | 28 +- .../{ => github}/test_issue_handler.py | 43 +- .../test_issue_handler_error_handling.py | 44 +- .../test_pr_handler_guess_success.py | 57 +- .../{ => github}/test_pr_title_escaping.py | 16 +- .../{ => github}/test_resolve_issues.py | 95 +- .../{ => github}/test_send_pull_request.py | 236 +-- .../gitlab/test_gitlab_guess_success.py | 202 +++ .../gitlab/test_gitlab_issue_handler.py | 683 +++++++++ ...est_gitlab_issue_handler_error_handling.py | 283 ++++ .../test_gitlab_pr_handler_guess_success.py | 672 +++++++++ .../gitlab/test_gitlab_pr_title_escaping.py | 167 +++ .../gitlab/test_gitlab_resolve_issues.py | 923 ++++++++++++ .../gitlab/test_gitlab_send_pull_request.py | 1335 +++++++++++++++++ tests/unit/resolver/test_issue_references.py | 44 +- 29 files changed, 6685 insertions(+), 1298 deletions(-) delete mode 100644 openhands/resolver/github_issue.py create mode 100644 openhands/resolver/interfaces/github.py create mode 100644 openhands/resolver/interfaces/gitlab.py create mode 100644 openhands/resolver/interfaces/issue.py create mode 100644 openhands/resolver/interfaces/issue_definitions.py delete mode 100644 openhands/resolver/issue_definitions.py rename tests/unit/resolver/{ => github}/test_guess_success.py (88%) rename tests/unit/resolver/{ => github}/test_issue_handler.py (94%) rename tests/unit/resolver/{ => github}/test_issue_handler_error_handling.py (86%) rename tests/unit/resolver/{ => github}/test_pr_handler_guess_success.py (92%) rename tests/unit/resolver/{ => github}/test_pr_title_escaping.py (93%) rename tests/unit/resolver/{ => github}/test_resolve_issues.py (93%) rename tests/unit/resolver/{ => github}/test_send_pull_request.py (89%) create mode 100644 tests/unit/resolver/gitlab/test_gitlab_guess_success.py create mode 100644 tests/unit/resolver/gitlab/test_gitlab_issue_handler.py create mode 100644 tests/unit/resolver/gitlab/test_gitlab_issue_handler_error_handling.py create mode 100644 tests/unit/resolver/gitlab/test_gitlab_pr_handler_guess_success.py create mode 100644 tests/unit/resolver/gitlab/test_gitlab_pr_title_escaping.py create mode 100644 tests/unit/resolver/gitlab/test_gitlab_resolve_issues.py create mode 100644 tests/unit/resolver/gitlab/test_gitlab_send_pull_request.py diff --git a/.github/workflows/openhands-resolver.yml b/.github/workflows/openhands-resolver.yml index dbfd678fc162..863896cf9213 100644 --- a/.github/workflows/openhands-resolver.yml +++ b/.github/workflows/openhands-resolver.yml @@ -231,7 +231,7 @@ jobs: - name: Attempt to resolve issue env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN || github.token }} - GITHUB_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} + GIT_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} LLM_MODEL: ${{ secrets.LLM_MODEL || inputs.LLM_MODEL }} LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} @@ -267,7 +267,7 @@ jobs: if: always() # Create PR or branch even if the previous steps fail env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN || github.token }} - GITHUB_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} + GIT_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} LLM_MODEL: ${{ secrets.LLM_MODEL || inputs.LLM_MODEL }} LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index c83d8af4f7f9..c202de0faff6 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -51,6 +51,9 @@ export default defineConfig(({ mode }) => { // rewriteWsOrigin: true, }, }, + watch: { + ignored: ['**/node_modules/**', '**/.git/**'], + }, }, ssr: { noExternal: ["react-syntax-highlighter"], diff --git a/openhands/resolver/README.md b/openhands/resolver/README.md index eab4af667e3f..01455e72aa30 100644 --- a/openhands/resolver/README.md +++ b/openhands/resolver/README.md @@ -1,4 +1,4 @@ -# OpenHands Github Issue Resolver 🙌 +# OpenHands Github & Gitlab Issue Resolver 🙌 Need help resolving a GitHub issue but don't have the time to do it yourself? Let an AI agent help you out! @@ -74,14 +74,24 @@ If you prefer to run the resolver programmatically instead of using GitHub Actio pip install openhands-ai ``` -2. Create a GitHub access token: - - Visit [GitHub's token settings](https://github.com/settings/personal-access-tokens/new) - - Create a fine-grained token with these scopes: - - "Content" - - "Pull requests" - - "Issues" - - "Workflows" - - If you don't have push access to the target repo, you can fork it first +2. Create a GitHub or GitLab access token: + - Create a GitHub acces token + - Visit [GitHub's token settings](https://github.com/settings/personal-access-tokens/new) + - Create a fine-grained token with these scopes: + - "Content" + - "Pull requests" + - "Issues" + - "Workflows" + - If you don't have push access to the target repo, you can fork it first + + - Create a GitLab acces token + - Visit [GitLab's token settings](https://gitlab.com/-/user_settings/personal_access_tokens) + - Create a fine-grained token with these scopes: + - 'api' + - 'read_api' + - 'read_user' + - 'read_repository' + - 'write_repository' 3. Set up environment variables: @@ -90,7 +100,12 @@ pip install openhands-ai # GitHub credentials export GITHUB_TOKEN="your-github-token" -export GITHUB_USERNAME="your-github-username" # Optional, defaults to token owner +export GIT_USERNAME="your-github-username" # Optional, defaults to token owner + +# GitLab credentials if you're using GitLab repo + +export GITLAB_TOKEN="your-gitlab-token" +export GIT_USERNAME="your-gitlab-username" # Optional, defaults to token owner # LLM configuration @@ -169,13 +184,13 @@ There are three ways you can upload: 3. `ready` - create a non-draft PR that's ready for review ```bash -python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --github-username YOUR_GITHUB_USERNAME --pr-type draft +python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --username YOUR_GITHUB_OR_GITLAB_USERNAME --pr-type draft ``` If you want to upload to a fork, you can do so by specifying the `fork-owner`: ```bash -python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --github-username YOUR_GITHUB_USERNAME --pr-type draft --fork-owner YOUR_GITHUB_USERNAME +python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --username YOUR_GITHUB_OR_GITLAB_USERNAME --pr-type draft --fork-owner YOUR_GITHUB_OR_GITLAB_USERNAME ``` ## Providing Custom Instructions @@ -184,5 +199,5 @@ You can customize how the AI agent approaches issue resolution by adding a `.ope ## Troubleshooting -If you have any issues, please open an issue on this github repo, we're happy to help! +If you have any issues, please open an issue on this github or gitlab repo, we're happy to help! Alternatively, you can [email us](mailto:contact@all-hands.dev) or join the OpenHands Slack workspace (see [the README](/README.md) for an invite link). diff --git a/openhands/resolver/github_issue.py b/openhands/resolver/github_issue.py deleted file mode 100644 index d7d7974d3fdf..000000000000 --- a/openhands/resolver/github_issue.py +++ /dev/null @@ -1,21 +0,0 @@ -from pydantic import BaseModel - - -class ReviewThread(BaseModel): - comment: str - files: list[str] - - -class GithubIssue(BaseModel): - owner: str - repo: str - number: int - title: str - body: str - thread_comments: list[str] | None = None # Added field for issue thread comments - closing_issues: list[str] | None = None - review_comments: list[str] | None = None - review_threads: list[ReviewThread] | None = None - thread_ids: list[str] | None = None - head_branch: str | None = None - base_branch: str | None = None diff --git a/openhands/resolver/interfaces/github.py b/openhands/resolver/interfaces/github.py new file mode 100644 index 000000000000..46cceb68a4f7 --- /dev/null +++ b/openhands/resolver/interfaces/github.py @@ -0,0 +1,591 @@ +from typing import Any + +import requests + +from openhands.core.logger import openhands_logger as logger +from openhands.resolver.interfaces.issue import ( + Issue, + IssueHandlerInterface, + ReviewThread, +) +from openhands.resolver.utils import extract_issue_references + + +class GithubIssueHandler(IssueHandlerInterface): + def __init__(self, owner: str, repo: str, token: str, username: str | None = None): + self.owner = owner + self.repo = repo + self.token = token + self.username = username + self.base_url = self.get_base_url() + self.download_url = self.get_download_url() + self.clone_url = self.get_clone_url() + self.headers = self.get_headers() + + def set_owner(self, owner: str): + self.owner = owner + + def get_headers(self): + return { + 'Authorization': f'token {self.token}', + 'Accept': 'application/vnd.github.v3+json', + } + + def get_base_url(self): + return f'https://api.github.com/repos/{self.owner}/{self.repo}' + + def get_authorize_url(self): + return f'https://{self.username}:{self.token}@github.com/' + + def get_branch_url(self, branch_name: str): + return self.get_base_url() + f'/branches/{branch_name}' + + def get_download_url(self): + return f'{self.base_url}/issues' + + def get_clone_url(self): + username_and_token = ( + f'{self.username}:{self.token}' + if self.username + else f'x-auth-token:{self.token}' + ) + return f'https://{username_and_token}@github.com/{self.owner}/{self.repo}.git' + + def get_graphql_url(self): + return 'https://api.github.com/graphql' + + def get_compare_url(self, branch_name: str): + return f'https://github.com/{self.owner}/{self.repo}/compare/{branch_name}?expand=1' + + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + """Download issues from Github. + + Args: + issue_numbers: The numbers of the issues to download + comment_id: The ID of a single comment, if provided, otherwise all comments + + Returns: + List of Github issues. + """ + + if not issue_numbers: + raise ValueError('Unspecified issue number') + + all_issues = self.download_issues() + logger.info(f'Limiting resolving to issues {issue_numbers}.') + all_issues = [ + issue + for issue in all_issues + if issue['number'] in issue_numbers and 'pull_request' not in issue + ] + + if len(issue_numbers) == 1 and not all_issues: + raise ValueError(f'Issue {issue_numbers[0]} not found') + + converted_issues = [] + for issue in all_issues: + # Check for required fields (number and title) + if any([issue.get(key) is None for key in ['number', 'title']]): + logger.warning( + f'Skipping issue {issue} as it is missing number or title.' + ) + continue + + # Handle empty body by using empty string + if issue.get('body') is None: + issue['body'] = '' + + # Get issue thread comments + thread_comments = self.get_issue_comments( + issue['number'], comment_id=comment_id + ) + # Convert empty lists to None for optional fields + issue_details = Issue( + owner=self.owner, + repo=self.repo, + number=issue['number'], + title=issue['title'], + body=issue['body'], + thread_comments=thread_comments, + review_comments=None, # Initialize review comments as None for regular issues + ) + + converted_issues.append(issue_details) + + return converted_issues + + def download_issues(self) -> list[Any]: + params: dict[str, int | str] = {'state': 'open', 'per_page': 100, 'page': 1} + all_issues = [] + + while True: + response = requests.get( + self.download_url, headers=self.headers, params=params + ) + response.raise_for_status() + issues = response.json() + + if not issues: + break + + if not isinstance(issues, list) or any( + [not isinstance(issue, dict) for issue in issues] + ): + raise ValueError( + 'Expected list of dictionaries from Service Github API.' + ) + + all_issues.extend(issues) + assert isinstance(params['page'], int) + params['page'] += 1 + + return all_issues + + def get_issue_comments( + self, issue_number: int, comment_id: int | None = None + ) -> list[str] | None: + """Download comments for a specific issue from Github.""" + url = f'{self.download_url}/{issue_number}/comments' + params = {'per_page': 100, 'page': 1} + all_comments = [] + + while True: + response = requests.get(url, headers=self.headers, params=params) + response.raise_for_status() + comments = response.json() + + if not comments: + break + + if comment_id: + matching_comment = next( + ( + comment['body'] + for comment in comments + if comment['id'] == comment_id + ), + None, + ) + if matching_comment: + return [matching_comment] + else: + all_comments.extend([comment['body'] for comment in comments]) + + params['page'] += 1 + + return all_comments if all_comments else None + + def branch_exists(self, branch_name: str) -> bool: + print(f'Checking if branch {branch_name} exists...') + response = requests.get( + f'{self.base_url}/branches/{branch_name}', headers=self.headers + ) + exists = response.status_code == 200 + print(f'Branch {branch_name} exists: {exists}') + return exists + + def get_branch_name(self, base_branch_name: str): + branch_name = base_branch_name + attempt = 1 + while self.branch_exists(branch_name): + attempt += 1 + branch_name = f'{base_branch_name}-try{attempt}' + return branch_name + + def reply_to_comment(self, pr_number: int, comment_id: str, reply: str): + # Opting for graphql as REST API doesn't allow reply to replies in comment threads + query = """ + mutation($body: String!, $pullRequestReviewThreadId: ID!) { + addPullRequestReviewThreadReply(input: { body: $body, pullRequestReviewThreadId: $pullRequestReviewThreadId }) { + comment { + id + body + createdAt + } + } + } + """ + + comment_reply = f'Openhands fix success summary\n\n\n{reply}' + variables = {'body': comment_reply, 'pullRequestReviewThreadId': comment_id} + url = self.get_graphql_url() + headers = { + 'Authorization': f'Bearer {self.token}', + 'Content-Type': 'application/json', + } + + response = requests.post( + url, json={'query': query, 'variables': variables}, headers=headers + ) + response.raise_for_status() + + def get_pull_url(self, pr_number: int): + return f'https://github.com/{self.owner}/{self.repo}/pull/{pr_number}' + + def get_default_branch_name(self) -> str: + response = requests.get(f'{self.base_url}', headers=self.headers) + response.raise_for_status() + return response.json()['default_branch'] + + def create_pull_request(self, data=dict) -> dict: + response = requests.post( + f'{self.base_url}/pulls', headers=self.headers, json=data + ) + if response.status_code == 403: + raise RuntimeError( + 'Failed to create pull request due to missing permissions. ' + 'Make sure that the provided token has push permissions for the repository.' + ) + response.raise_for_status() + pr_data = response.json() + return pr_data + + def request_reviewers(self, reviewer: str, pr_number: int): + review_data = {'reviewers': [reviewer]} + review_response = requests.post( + f'{self.base_url}/pulls/{pr_number}/requested_reviewers', + headers=self.headers, + json=review_data, + ) + if review_response.status_code != 201: + print( + f'Warning: Failed to request review from {reviewer}: {review_response.text}' + ) + + def send_comment_msg(self, issue_number: int, msg: str): + """Send a comment message to a GitHub issue or pull request. + + Args: + issue_number: The issue or pull request number + msg: The message content to post as a comment + """ + # Post a comment on the PR + comment_url = f'{self.base_url}/issues/{issue_number}/comments' + comment_data = {'body': msg} + comment_response = requests.post( + comment_url, headers=self.headers, json=comment_data + ) + if comment_response.status_code != 201: + print( + f'Failed to post comment: {comment_response.status_code} {comment_response.text}' + ) + else: + print(f'Comment added to the PR: {msg}') + + def get_context_from_external_issues_references( + self, + closing_issues: list[str], + closing_issue_numbers: list[int], + issue_body: str, + review_comments: list[str] | None, + review_threads: list[ReviewThread], + thread_comments: list[str] | None, + ): + pass + + +class GithubPRHandler(GithubIssueHandler): + def __init__(self, owner: str, repo: str, token: str, username: str | None = None): + super().__init__(owner, repo, token, username) + self.download_url = ( + f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls' + ) + + def download_pr_metadata( + self, pull_number: int, comment_id: int | None = None + ) -> tuple[list[str], list[int], list[str], list[ReviewThread], list[str]]: + """Run a GraphQL query against the GitHub API for information. + + Retrieves information about: + 1. unresolved review comments + 2. referenced issues the pull request would close + + Args: + pull_number: The number of the pull request to query. + comment_id: Optional ID of a specific comment to focus on. + query: The GraphQL query as a string. + variables: A dictionary of variables for the query. + token: Your GitHub personal access token. + + Returns: + The JSON response from the GitHub API. + """ + # Using graphql as REST API doesn't indicate resolved status for review comments + # TODO: grabbing the first 10 issues, 100 review threads, and 100 coments; add pagination to retrieve all + query = """ + query($owner: String!, $repo: String!, $pr: Int!) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $pr) { + closingIssuesReferences(first: 10) { + edges { + node { + body + number + } + } + } + url + reviews(first: 100) { + nodes { + body + state + fullDatabaseId + } + } + reviewThreads(first: 100) { + edges{ + node{ + id + isResolved + comments(first: 100) { + totalCount + nodes { + body + path + fullDatabaseId + } + } + } + } + } + } + } + } + """ + + variables = {'owner': self.owner, 'repo': self.repo, 'pr': pull_number} + + url = 'https://api.github.com/graphql' + headers = { + 'Authorization': f'Bearer {self.token}', + 'Content-Type': 'application/json', + } + + response = requests.post( + url, json={'query': query, 'variables': variables}, headers=headers + ) + response.raise_for_status() + response_json = response.json() + + # Parse the response to get closing issue references and unresolved review comments + pr_data = ( + response_json.get('data', {}).get('repository', {}).get('pullRequest', {}) + ) + + # Get closing issues + closing_issues = pr_data.get('closingIssuesReferences', {}).get('edges', []) + closing_issues_bodies = [issue['node']['body'] for issue in closing_issues] + closing_issue_numbers = [ + issue['node']['number'] for issue in closing_issues + ] # Extract issue numbers + + # Get review comments + reviews = pr_data.get('reviews', {}).get('nodes', []) + if comment_id is not None: + reviews = [ + review + for review in reviews + if int(review['fullDatabaseId']) == comment_id + ] + review_bodies = [review['body'] for review in reviews] + + # Get unresolved review threads + review_threads = [] + thread_ids = [] # Store thread IDs; agent replies to the thread + raw_review_threads = pr_data.get('reviewThreads', {}).get('edges', []) + for thread in raw_review_threads: + node = thread.get('node', {}) + if not node.get( + 'isResolved', True + ): # Check if the review thread is unresolved + id = node.get('id') + thread_contains_comment_id = False + my_review_threads = node.get('comments', {}).get('nodes', []) + message = '' + files = [] + for i, review_thread in enumerate(my_review_threads): + if ( + comment_id is not None + and int(review_thread['fullDatabaseId']) == comment_id + ): + thread_contains_comment_id = True + + if ( + i == len(my_review_threads) - 1 + ): # Check if it's the last thread in the thread + if len(my_review_threads) > 1: + message += '---\n' # Add "---" before the last message if there's more than one thread + message += 'latest feedback:\n' + review_thread['body'] + '\n' + else: + message += ( + review_thread['body'] + '\n' + ) # Add each thread in a new line + + file = review_thread.get('path') + if file and file not in files: + files.append(file) + + if comment_id is None or thread_contains_comment_id: + unresolved_thread = ReviewThread(comment=message, files=files) + review_threads.append(unresolved_thread) + thread_ids.append(id) + + return ( + closing_issues_bodies, + closing_issue_numbers, + review_bodies, + review_threads, + thread_ids, + ) + + # Override processing of downloaded issues + def get_pr_comments( + self, pr_number: int, comment_id: int | None = None + ) -> list[str] | None: + """Download comments for a specific pull request from Github.""" + url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{pr_number}/comments' + headers = { + 'Authorization': f'token {self.token}', + 'Accept': 'application/vnd.github.v3+json', + } + params = {'per_page': 100, 'page': 1} + all_comments = [] + + while True: + response = requests.get(url, headers=headers, params=params) + response.raise_for_status() + comments = response.json() + + if not comments: + break + + if comment_id is not None: + matching_comment = next( + ( + comment['body'] + for comment in comments + if comment['id'] == comment_id + ), + None, + ) + if matching_comment: + return [matching_comment] + else: + all_comments.extend([comment['body'] for comment in comments]) + + params['page'] += 1 + + return all_comments if all_comments else None + + def get_context_from_external_issues_references( + self, + closing_issues: list[str], + closing_issue_numbers: list[int], + issue_body: str, + review_comments: list[str] | None, + review_threads: list[ReviewThread], + thread_comments: list[str] | None, + ): + new_issue_references = [] + + if issue_body: + new_issue_references.extend(extract_issue_references(issue_body)) + + if review_comments: + for comment in review_comments: + new_issue_references.extend(extract_issue_references(comment)) + + if review_threads: + for review_thread in review_threads: + new_issue_references.extend( + extract_issue_references(review_thread.comment) + ) + + if thread_comments: + for thread_comment in thread_comments: + new_issue_references.extend(extract_issue_references(thread_comment)) + + non_duplicate_references = set(new_issue_references) + unique_issue_references = non_duplicate_references.difference( + closing_issue_numbers + ) + + for issue_number in unique_issue_references: + try: + url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{issue_number}' + headers = { + 'Authorization': f'Bearer {self.token}', + 'Accept': 'application/vnd.github.v3+json', + } + response = requests.get(url, headers=headers) + response.raise_for_status() + issue_data = response.json() + issue_body = issue_data.get('body', '') + if issue_body: + closing_issues.append(issue_body) + except requests.exceptions.RequestException as e: + logger.warning(f'Failed to fetch issue {issue_number}: {str(e)}') + + return closing_issues + + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + if not issue_numbers: + raise ValueError('Unspecified issue numbers') + + all_issues = self.download_issues() + logger.info(f'Limiting resolving to issues {issue_numbers}.') + all_issues = [issue for issue in all_issues if issue['number'] in issue_numbers] + + converted_issues = [] + for issue in all_issues: + # For PRs, body can be None + if any([issue.get(key) is None for key in ['number', 'title']]): + logger.warning(f'Skipping #{issue} as it is missing number or title.') + continue + + # Handle None body for PRs + body = issue.get('body') if issue.get('body') is not None else '' + ( + closing_issues, + closing_issues_numbers, + review_comments, + review_threads, + thread_ids, + ) = self.download_pr_metadata(issue['number'], comment_id=comment_id) + head_branch = issue['head']['ref'] + + # Get PR thread comments + thread_comments = self.get_pr_comments( + issue['number'], comment_id=comment_id + ) + + closing_issues = self.get_context_from_external_issues_references( + closing_issues, + closing_issues_numbers, + body, + review_comments, + review_threads, + thread_comments, + ) + + issue_details = Issue( + owner=self.owner, + repo=self.repo, + number=issue['number'], + title=issue['title'], + body=body, + closing_issues=closing_issues, + review_comments=review_comments, + review_threads=review_threads, + thread_ids=thread_ids, + head_branch=head_branch, + thread_comments=thread_comments, + ) + + converted_issues.append(issue_details) + + return converted_issues diff --git a/openhands/resolver/interfaces/gitlab.py b/openhands/resolver/interfaces/gitlab.py new file mode 100644 index 000000000000..0b2937170910 --- /dev/null +++ b/openhands/resolver/interfaces/gitlab.py @@ -0,0 +1,577 @@ +from typing import Any +from urllib.parse import quote + +import requests + +from openhands.core.logger import openhands_logger as logger +from openhands.resolver.interfaces.issue import ( + Issue, + IssueHandlerInterface, + ReviewThread, +) +from openhands.resolver.utils import extract_issue_references + + +class GitlabIssueHandler(IssueHandlerInterface): + def __init__(self, owner: str, repo: str, token: str, username: str | None = None): + self.owner = owner + self.repo = repo + self.token = token + self.username = username + self.base_url = self.get_base_url() + self.download_url = self.get_download_url() + self.clone_url = self.get_clone_url() + self.headers = self.get_headers() + + def set_owner(self, owner: str): + self.owner = owner + + def get_headers(self): + return { + 'Authorization': f'Bearer {self.token}', + 'Accept': 'application/json', + } + + def get_base_url(self): + return f'https://gitlab.com/api/v4/projects/{quote(f'{self.owner}/{self.repo}', safe="")}' + + def get_authorize_url(self): + return f'https://{self.username}:{self.token}@gitlab.com/' + + def get_branch_url(self, branch_name: str): + return self.get_base_url() + f'/repository/branches/{branch_name}' + + def get_download_url(self): + return f'{self.base_url}/issues' + + def get_clone_url(self): + username_and_token = ( + f'{self.username}:{self.token}' if self.username else f'{self.token}' + ) + return f'https://{username_and_token}@gitlab.com/{self.owner}/{self.repo}.git' + + def get_graphql_url(self): + return 'https://gitlab.com/api/graphql' + + def get_compare_url(self, branch_name: str): + return f'https://gitlab.com/{self.owner}/{self.repo}/-/compare/{self.get_default_branch_name()}...{branch_name}' + + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + """Download issues from Gitlab. + + Args: + issue_numbers: The numbers of the issues to download + comment_id: The ID of a single comment, if provided, otherwise all comments + + Returns: + List of Gitlab issues. + """ + + if not issue_numbers: + raise ValueError('Unspecified issue number') + + all_issues = self.download_issues() + logger.info(f'Limiting resolving to issues {issue_numbers}.') + all_issues = [ + issue + for issue in all_issues + # if issue['iid'] in issue_numbers and issue['merge_requests_count'] == 0 + if issue['iid'] in issue_numbers # TODO for testing + ] + + if len(issue_numbers) == 1 and not all_issues: + raise ValueError(f'Issue {issue_numbers[0]} not found') + + converted_issues = [] + for issue in all_issues: + if any([issue.get(key) is None for key in ['iid', 'title']]): + logger.warning(f'Skipping issue {issue} as it is missing iid or title.') + continue + + # Handle empty body by using empty string + if issue.get('description') is None: + issue['description'] = '' + + # Get issue thread comments + thread_comments = self.get_issue_comments( + issue['iid'], comment_id=comment_id + ) + # Convert empty lists to None for optional fields + issue_details = Issue( + owner=self.owner, + repo=self.repo, + number=issue['iid'], + title=issue['title'], + body=issue['description'], + thread_comments=thread_comments, + review_comments=None, # Initialize review comments as None for regular issues + ) + + converted_issues.append(issue_details) + + return converted_issues + + def download_issues(self) -> list[Any]: + params: dict[str, int | str] = { + 'state': 'opened', + 'scope': 'all', + 'per_page': 100, + 'page': 1, + } + all_issues = [] + + while True: + response = requests.get( + self.download_url, headers=self.headers, params=params + ) + response.raise_for_status() + issues = response.json() + + if not issues: + break + + if not isinstance(issues, list) or any( + [not isinstance(issue, dict) for issue in issues] + ): + raise ValueError( + 'Expected list of dictionaries from Service Gitlab API.' + ) + + all_issues.extend(issues) + assert isinstance(params['page'], int) + params['page'] += 1 + + return all_issues + + def get_issue_comments( + self, issue_number: int, comment_id: int | None = None + ) -> list[str] | None: + """Download comments for a specific issue from Gitlab.""" + url = f'{self.download_url}/{issue_number}/notes' + params = {'per_page': 100, 'page': 1} + all_comments = [] + + while True: + response = requests.get(url, headers=self.headers, params=params) + response.raise_for_status() + comments = response.json() + + if not comments: + break + + if comment_id: + matching_comment = next( + ( + comment['body'] + for comment in comments + if comment['id'] == comment_id + ), + None, + ) + if matching_comment: + return [matching_comment] + else: + all_comments.extend([comment['body'] for comment in comments]) + + params['page'] += 1 + + return all_comments if all_comments else None + + def branch_exists(self, branch_name: str) -> bool: + print(f'Checking if branch {branch_name} exists...') + response = requests.get( + f'{self.base_url}/repository/branches/{branch_name}', headers=self.headers + ) + exists = response.status_code == 200 + print(f'Branch {branch_name} exists: {exists}') + return exists + + def get_branch_name(self, base_branch_name: str): + branch_name = base_branch_name + attempt = 1 + while self.branch_exists(branch_name): + attempt += 1 + branch_name = f'{base_branch_name}-try{attempt}' + return branch_name + + def reply_to_comment(self, pr_number: int, comment_id: str, reply: str): + response = requests.get( + f'{self.base_url}/merge_requests/{pr_number}/discussions/{comment_id.split('/')[-1]}', + headers=self.headers, + ) + response.raise_for_status() + discussions = response.json() + if len(discussions.get('notes', [])) > 0: + data = { + 'body': f'Openhands fix success summary\n\n\n{reply}', + 'note_id': discussions.get('notes', [])[-1]['id'], + } + response = requests.post( + f'{self.base_url}/merge_requests/{pr_number}/discussions/{comment_id.split('/')[-1]}/notes', + headers=self.headers, + json=data, + ) + response.raise_for_status() + + def get_pull_url(self, pr_number: int): + return ( + f'https://gitlab.com/{self.owner}/{self.repo}/-/merge_requests/{pr_number}' + ) + + def get_default_branch_name(self) -> str: + response = requests.get(f'{self.base_url}', headers=self.headers) + response.raise_for_status() + return response.json()['default_branch'] + + def create_pull_request(self, data=dict) -> dict: + response = requests.post( + f'{self.base_url}/merge_requests', headers=self.headers, json=data + ) + if response.status_code == 403: + raise RuntimeError( + 'Failed to create pull request due to missing permissions. ' + 'Make sure that the provided token has push permissions for the repository.' + ) + response.raise_for_status() + pr_data = response.json() + if 'web_url' in pr_data: + pr_data['html_url'] = pr_data['web_url'] + + if 'iid' in pr_data: + pr_data['number'] = pr_data['iid'] + + return pr_data + + def request_reviewers(self, reviewer: str, pr_number: int): + response = requests.get( + f'https://gitlab.com/api/v4/users?username={reviewer}', + headers=self.headers, + ) + response.raise_for_status() + user_data = response.json() + if len(user_data) > 0: + review_data = {'reviewer_ids': [user_data[0]['id']]} + review_response = requests.put( + f'{self.base_url}/merge_requests/{pr_number}', + headers=self.headers, + json=review_data, + ) + if review_response.status_code != 200: + print( + f'Warning: Failed to request review from {reviewer}: {review_response.text}' + ) + + def send_comment_msg(self, issue_number: int, msg: str): + """Send a comment message to a GitHub issue or pull request. + + Args: + issue_number: The issue or pull request number + msg: The message content to post as a comment + """ + # Post a comment on the PR + comment_url = f'{self.base_url}/issues/{issue_number}/notes' + comment_data = {'body': msg} + comment_response = requests.post( + comment_url, headers=self.headers, json=comment_data + ) + if comment_response.status_code != 201: + print( + f'Failed to post comment: {comment_response.status_code} {comment_response.text}' + ) + else: + print(f'Comment added to the PR: {msg}') + + def get_context_from_external_issues_references( + self, + closing_issues: list[str], + closing_issue_numbers: list[int], + issue_body: str, + review_comments: list[str] | None, + review_threads: list[ReviewThread], + thread_comments: list[str] | None, + ): + pass + + +class GitlabPRHandler(GitlabIssueHandler): + def __init__(self, owner: str, repo: str, token: str, username: str | None = None): + super().__init__(owner, repo, token, username) + self.download_url = f'{self.base_url}/merge_requests' + + def download_pr_metadata( + self, pull_number: int, comment_id: int | None = None + ) -> tuple[list[str], list[int], list[str] | None, list[ReviewThread], list[str]]: + """Run a GraphQL query against the Gitlab API for information. + + Retrieves information about: + 1. unresolved review comments + 2. referenced issues the pull request would close + + Args: + pull_number: The number of the pull request to query. + comment_id: Optional ID of a specific comment to focus on. + query: The GraphQL query as a string. + variables: A dictionary of variables for the query. + token: Your Gitlab personal access token. + + Returns: + The JSON response from the Gitlab API. + """ + # Using graphql as REST API doesn't indicate resolved status for review comments + # TODO: grabbing the first 10 issues, 100 review threads, and 100 coments; add pagination to retrieve all + response = requests.get( + f'{self.base_url}/merge_requests/{pull_number}/related_issues', + headers=self.headers, + ) + response.raise_for_status() + closing_issues = response.json() + closing_issues_bodies = [issue['description'] for issue in closing_issues] + closing_issue_numbers = [ + issue['iid'] for issue in closing_issues + ] # Extract issue numbers + + query = """ + query($projectPath: ID!, $pr: String!) { + project(fullPath: $projectPath) { + mergeRequest(iid: $pr) { + webUrl + discussions(first: 100) { + edges { + node { + id + resolved + resolvable + notes(first: 100) { + nodes { + body + id + position { + filePath + } + } + } + } + } + } + } + } + } + """ + + variables = {'projectPath': f'{self.owner}/{self.repo}', 'pr': f'{pull_number}'} + + response = requests.post( + self.get_graphql_url(), + json={'query': query, 'variables': variables}, + headers=self.headers, + ) + response.raise_for_status() + response_json = response.json() + + # Parse the response to get closing issue references and unresolved review comments + pr_data = ( + response_json.get('data', {}).get('project', {}).get('mergeRequest', {}) + ) + + # Get review comments + review_bodies = None + + # Get unresolved review threads + review_threads = [] + thread_ids = [] # Store thread IDs; agent replies to the thread + raw_review_threads = pr_data.get('discussions', {}).get('edges', []) + + for thread in raw_review_threads: + node = thread.get('node', {}) + if not node.get('resolved', True) and node.get( + 'resolvable', True + ): # Check if the review thread is unresolved + id = node.get('id') + thread_contains_comment_id = False + my_review_threads = node.get('notes', {}).get('nodes', []) + message = '' + files = [] + for i, review_thread in enumerate(my_review_threads): + if ( + comment_id is not None + and int(review_thread['id'].split('/')[-1]) == comment_id + ): + thread_contains_comment_id = True + + if ( + i == len(my_review_threads) - 1 + ): # Check if it's the last thread in the thread + if len(my_review_threads) > 1: + message += '---\n' # Add "---" before the last message if there's more than one thread + message += 'latest feedback:\n' + review_thread['body'] + '\n' + else: + message += ( + review_thread['body'] + '\n' + ) # Add each thread in a new line + + file = review_thread.get('position', {}) + file = file.get('filePath') if file is not None else None + if file and file not in files: + files.append(file) + + if comment_id is None or thread_contains_comment_id: + unresolved_thread = ReviewThread(comment=message, files=files) + review_threads.append(unresolved_thread) + thread_ids.append(id) + + return ( + closing_issues_bodies, + closing_issue_numbers, + review_bodies, + review_threads, + thread_ids, + ) + + # Override processing of downloaded issues + def get_pr_comments( + self, pr_number: int, comment_id: int | None = None + ) -> list[str] | None: + """Download comments for a specific pull request from Gitlab.""" + url = f'{self.base_url}/merge_requests/{pr_number}/notes' + params = {'per_page': 100, 'page': 1} + all_comments = [] + + while True: + response = requests.get(url, headers=self.headers, params=params) + response.raise_for_status() + comments = response.json() + comments = [ + comment + for comment in comments + if comment.get('resolvable', True) and not comment.get('system', True) + ] + + if not comments: + break + + if comment_id is not None: + matching_comment = next( + ( + comment['body'] + for comment in comments + if comment['id'] == comment_id + ), + None, + ) + if matching_comment: + return [matching_comment] + else: + all_comments.extend([comment['body'] for comment in comments]) + + params['page'] += 1 + + return all_comments if all_comments else None + + def get_context_from_external_issues_references( + self, + closing_issues: list[str], + closing_issue_numbers: list[int], + issue_body: str, + review_comments: list[str] | None, + review_threads: list[ReviewThread], + thread_comments: list[str] | None, + ): + new_issue_references = [] + + if issue_body: + new_issue_references.extend(extract_issue_references(issue_body)) + + if review_comments: + for comment in review_comments: + new_issue_references.extend(extract_issue_references(comment)) + + if review_threads: + for review_thread in review_threads: + new_issue_references.extend( + extract_issue_references(review_thread.comment) + ) + + if thread_comments: + for thread_comment in thread_comments: + new_issue_references.extend(extract_issue_references(thread_comment)) + + non_duplicate_references = set(new_issue_references) + unique_issue_references = non_duplicate_references.difference( + closing_issue_numbers + ) + + for issue_number in unique_issue_references: + try: + url = f'{self.base_url}/issues/{issue_number}' + response = requests.get(url, headers=self.headers) + response.raise_for_status() + issue_data = response.json() + issue_body = issue_data.get('description', '') + if issue_body: + closing_issues.append(issue_body) + except requests.exceptions.RequestException as e: + logger.warning(f'Failed to fetch issue {issue_number}: {str(e)}') + + return closing_issues + + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + if not issue_numbers: + raise ValueError('Unspecified issue numbers') + + all_issues = self.download_issues() + logger.info(f'Limiting resolving to issues {issue_numbers}.') + all_issues = [issue for issue in all_issues if issue['iid'] in issue_numbers] + + converted_issues = [] + for issue in all_issues: + # For PRs, body can be None + if any([issue.get(key) is None for key in ['iid', 'title']]): + logger.warning(f'Skipping #{issue} as it is missing iid or title.') + continue + + # Handle None body for PRs + body = ( + issue.get('description') if issue.get('description') is not None else '' + ) + ( + closing_issues, + closing_issues_numbers, + review_comments, + review_threads, + thread_ids, + ) = self.download_pr_metadata(issue['iid'], comment_id=comment_id) + head_branch = issue['source_branch'] + + # Get PR thread comments + thread_comments = self.get_pr_comments(issue['iid'], comment_id=comment_id) + + closing_issues = self.get_context_from_external_issues_references( + closing_issues, + closing_issues_numbers, + body, + review_comments, + review_threads, + thread_comments, + ) + + issue_details = Issue( + owner=self.owner, + repo=self.repo, + number=issue['iid'], + title=issue['title'], + body=body, + closing_issues=closing_issues, + review_comments=review_comments, + review_threads=review_threads, + thread_ids=thread_ids, + head_branch=head_branch, + thread_comments=thread_comments, + ) + + converted_issues.append(issue_details) + + return converted_issues diff --git a/openhands/resolver/interfaces/issue.py b/openhands/resolver/interfaces/issue.py new file mode 100644 index 000000000000..263fd8160377 --- /dev/null +++ b/openhands/resolver/interfaces/issue.py @@ -0,0 +1,123 @@ +from abc import ABC, abstractmethod +from typing import Any + +from pydantic import BaseModel + + +class ReviewThread(BaseModel): + comment: str + files: list[str] + + +class Issue(BaseModel): + owner: str + repo: str + number: int + title: str + body: str + thread_comments: list[str] | None = None # Added field for issue thread comments + closing_issues: list[str] | None = None + review_comments: list[str] | None = None + review_threads: list[ReviewThread] | None = None + thread_ids: list[str] | None = None + head_branch: str | None = None + base_branch: str | None = None + + +class IssueHandlerInterface(ABC): + @abstractmethod + def set_owner(self, owner: str): + pass + + @abstractmethod + def download_issues(self) -> list[Any]: + pass + + @abstractmethod + def get_issue_comments( + self, issue_number: int, comment_id: int | None = None + ) -> list[str] | None: + pass + + @abstractmethod + def get_base_url(self): + pass + + @abstractmethod + def get_branch_url(self, branch_name): + pass + + @abstractmethod + def get_download_url(self): + pass + + @abstractmethod + def get_clone_url(self): + pass + + @abstractmethod + def get_pull_url(self, pr_number: int): + pass + + @abstractmethod + def get_graphql_url(self): + pass + + @abstractmethod + def get_headers(self): + pass + + @abstractmethod + def get_compare_url(self, branch_name): + pass + + @abstractmethod + def get_branch_name(self, base_branch_name: str): + pass + + @abstractmethod + def get_default_branch_name(self): + pass + + @abstractmethod + def branch_exists(self, branch_name: str) -> bool: + pass + + @abstractmethod + def reply_to_comment(self, pr_number: int, comment_id: str, reply: str): + pass + + @abstractmethod + def send_comment_msg(self, issue_number: int, msg: str): + pass + + @abstractmethod + def get_authorize_url(self): + pass + + @abstractmethod + def create_pull_request(self, data=dict) -> dict: + pass + + @abstractmethod + def request_reviewers(self, reviewer: str, pr_number: int): + pass + + @abstractmethod + def get_context_from_external_issues_references( + self, + closing_issues: list[str], + closing_issue_numbers: list[int], + issue_body: str, + review_comments: list[str] | None, + review_threads: list[ReviewThread], + thread_comments: list[str] | None, + ): + pass + + @abstractmethod + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + """Download issues from Gitlab.""" + pass diff --git a/openhands/resolver/interfaces/issue_definitions.py b/openhands/resolver/interfaces/issue_definitions.py new file mode 100644 index 000000000000..6912ab5c1e78 --- /dev/null +++ b/openhands/resolver/interfaces/issue_definitions.py @@ -0,0 +1,400 @@ +import json +import os +import re +from typing import Any, ClassVar + +import jinja2 + +from openhands.core.config import LLMConfig +from openhands.events.event import Event +from openhands.llm.llm import LLM +from openhands.resolver.interfaces.issue import ( + Issue, + IssueHandlerInterface, + ReviewThread, +) +from openhands.resolver.utils import extract_image_urls + + +class ServiceContext: + issue_type: ClassVar[str] + default_git_patch: ClassVar[str] = 'No changes made yet' + + def __init__(self, strategy: IssueHandlerInterface, llm_config: LLMConfig | None): + self._strategy = strategy + if llm_config is not None: + self.llm = LLM(llm_config) + + def set_strategy(self, strategy): + self._strategy = strategy + + +# Strategy context interface +class ServiceContextPR(ServiceContext): + issue_type: ClassVar[str] = 'pr' + + def __init__(self, strategy: IssueHandlerInterface, llm_config: LLMConfig): + super().__init__(strategy, llm_config) + + def get_clone_url(self): + return self._strategy.get_clone_url() + + def download_issues(self) -> list[Any]: + return self._strategy.download_issues() + + def guess_success( + self, + issue: Issue, + history: list[Event], + git_patch: str | None = None, + ) -> tuple[bool, None | list[bool], str]: + """Guess if the issue is fixed based on the history, issue description and git patch. + + Args: + issue: The issue to check + history: The agent's history + git_patch: Optional git patch showing the changes made + """ + last_message = history[-1].message + + issues_context = json.dumps(issue.closing_issues, indent=4) + success_list = [] + explanation_list = [] + + # Handle PRs with file-specific review comments + if issue.review_threads: + for review_thread in issue.review_threads: + if issues_context and last_message: + success, explanation = self._check_review_thread( + review_thread, issues_context, last_message, git_patch + ) + else: + success, explanation = False, 'Missing context or message' + success_list.append(success) + explanation_list.append(explanation) + # Handle PRs with only thread comments (no file-specific review comments) + elif issue.thread_comments: + if issue.thread_comments and issues_context and last_message: + success, explanation = self._check_thread_comments( + issue.thread_comments, issues_context, last_message, git_patch + ) + else: + success, explanation = ( + False, + 'Missing thread comments, context or message', + ) + success_list.append(success) + explanation_list.append(explanation) + elif issue.review_comments: + # Handle PRs with only review comments (no file-specific review comments or thread comments) + if issue.review_comments and issues_context and last_message: + success, explanation = self._check_review_comments( + issue.review_comments, issues_context, last_message, git_patch + ) + else: + success, explanation = ( + False, + 'Missing review comments, context or message', + ) + success_list.append(success) + explanation_list.append(explanation) + else: + # No review comments, thread comments, or file-level review comments found + return False, None, 'No feedback was found to process' + + # Return overall success (all must be true) and explanations + if not success_list: + return False, None, 'No feedback was processed' + return all(success_list), success_list, json.dumps(explanation_list) + + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + return self._strategy.get_converted_issues(issue_numbers, comment_id) + + def get_instruction( + self, + issue: Issue, + prompt_template: str, + repo_instruction: str | None = None, + ) -> tuple[str, list[str]]: + """Generate instruction for the agent.""" + template = jinja2.Template(prompt_template) + images = [] + + issues_str = None + if issue.closing_issues: + issues_str = json.dumps(issue.closing_issues, indent=4) + images.extend(extract_image_urls(issues_str)) + + # Handle PRs with review comments + review_comments_str = None + if issue.review_comments: + review_comments_str = json.dumps(issue.review_comments, indent=4) + images.extend(extract_image_urls(review_comments_str)) + + # Handle PRs with file-specific review comments + review_thread_str = None + review_thread_file_str = None + if issue.review_threads: + review_threads = [ + review_thread.comment for review_thread in issue.review_threads + ] + review_thread_files = [] + for review_thread in issue.review_threads: + review_thread_files.extend(review_thread.files) + review_thread_str = json.dumps(review_threads, indent=4) + review_thread_file_str = json.dumps(review_thread_files, indent=4) + images.extend(extract_image_urls(review_thread_str)) + + # Format thread comments if they exist + thread_context = '' + if issue.thread_comments: + thread_context = '\n---\n'.join(issue.thread_comments) + images.extend(extract_image_urls(thread_context)) + + instruction = template.render( + issues=issues_str, + review_comments=review_comments_str, + review_threads=review_thread_str, + files=review_thread_file_str, + thread_context=thread_context, + repo_instruction=repo_instruction, + ) + return instruction, images + + def _check_feedback_with_llm(self, prompt: str) -> tuple[bool, str]: + """Helper function to check feedback with LLM and parse response.""" + response = self.llm.completion(messages=[{'role': 'user', 'content': prompt}]) + + answer = response.choices[0].message.content.strip() + pattern = r'--- success\n*(true|false)\n*--- explanation*\n((?:.|\n)*)' + match = re.search(pattern, answer) + if match: + return match.group(1).lower() == 'true', match.group(2).strip() + return False, f'Failed to decode answer from LLM response: {answer}' + + def _check_review_thread( + self, + review_thread: ReviewThread, + issues_context: str, + last_message: str, + git_patch: str | None = None, + ) -> tuple[bool, str]: + """Check if a review thread's feedback has been addressed.""" + files_context = json.dumps(review_thread.files, indent=4) + + with open( + os.path.join( + os.path.dirname(__file__), + '../prompts/guess_success/pr-feedback-check.jinja', + ), + 'r', + ) as f: + template = jinja2.Template(f.read()) + + prompt = template.render( + issue_context=issues_context, + feedback=review_thread.comment, + files_context=files_context, + last_message=last_message, + git_patch=git_patch or self.default_git_patch, + ) + + return self._check_feedback_with_llm(prompt) + + def _check_thread_comments( + self, + thread_comments: list[str], + issues_context: str, + last_message: str, + git_patch: str | None = None, + ) -> tuple[bool, str]: + """Check if thread comments feedback has been addressed.""" + thread_context = '\n---\n'.join(thread_comments) + + with open( + os.path.join( + os.path.dirname(__file__), + '../prompts/guess_success/pr-thread-check.jinja', + ), + 'r', + ) as f: + template = jinja2.Template(f.read()) + + prompt = template.render( + issue_context=issues_context, + thread_context=thread_context, + last_message=last_message, + git_patch=git_patch or self.default_git_patch, + ) + + return self._check_feedback_with_llm(prompt) + + def _check_review_comments( + self, + review_comments: list[str], + issues_context: str, + last_message: str, + git_patch: str | None = None, + ) -> tuple[bool, str]: + """Check if review comments feedback has been addressed.""" + review_context = '\n---\n'.join(review_comments) + + with open( + os.path.join( + os.path.dirname(__file__), + '../prompts/guess_success/pr-review-check.jinja', + ), + 'r', + ) as f: + template = jinja2.Template(f.read()) + + prompt = template.render( + issue_context=issues_context, + review_context=review_context, + last_message=last_message, + git_patch=git_patch or self.default_git_patch, + ) + + return self._check_feedback_with_llm(prompt) + + +class ServiceContextIssue(ServiceContext): + issue_type: ClassVar[str] = 'issue' + + def __init__(self, strategy: IssueHandlerInterface, llm_config: LLMConfig | None): + super().__init__(strategy, llm_config) + + def get_base_url(self): + return self._strategy.get_base_url() + + def get_branch_url(self, branch_name): + return self._strategy.get_branch_url(branch_name) + + def get_download_url(self): + return self._strategy.get_download_url() + + def get_clone_url(self): + return self._strategy.get_clone_url() + + def get_graphql_url(self): + return self._strategy.get_graphql_url() + + def get_headers(self): + return self._strategy.get_headers() + + def get_authorize_url(self): + return self._strategy.get_authorize_url() + + def get_pull_url(self, pr_number: int): + return self._strategy.get_pull_url(pr_number) + + def get_compare_url(self, branch_name: str): + return self._strategy.get_compare_url(branch_name) + + def download_issues(self) -> list[Any]: + return self._strategy.download_issues() + + def get_branch_name( + self, + base_branch_name: str, + ): + return self._strategy.get_branch_name(base_branch_name) + + def branch_exists(self, branch_name: str): + return self._strategy.branch_exists(branch_name) + + def get_default_branch_name(self) -> str: + return self._strategy.get_default_branch_name() + + def create_pull_request(self, data=dict): + return self._strategy.create_pull_request(data) + + def request_reviewers(self, reviewer: str, pr_number: int): + return self._strategy.request_reviewers(reviewer, pr_number) + + def reply_to_comment(self, pr_number, comment_id, reply): + return self._strategy.reply_to_comment(pr_number, comment_id, reply) + + def send_comment_msg(self, issue_number: int, msg: str): + return self._strategy.send_comment_msg(issue_number, msg) + + def get_issue_comments( + self, issue_number: int, comment_id: int | None = None + ) -> list[str] | None: + return self._strategy.get_issue_comments(issue_number, comment_id) + + def get_instruction( + self, + issue: Issue, + prompt_template: str, + repo_instruction: str | None = None, + ) -> tuple[str, list[str]]: + """Generate instruction for the agent.""" + # Format thread comments if they exist + thread_context = '' + if issue.thread_comments: + thread_context = '\n\nIssue Thread Comments:\n' + '\n---\n'.join( + issue.thread_comments + ) + + images = [] + images.extend(extract_image_urls(issue.body)) + images.extend(extract_image_urls(thread_context)) + + template = jinja2.Template(prompt_template) + return ( + template.render( + body=issue.title + '\n\n' + issue.body + thread_context, + repo_instruction=repo_instruction, + ), + images, + ) + + def guess_success( + self, issue: Issue, history: list[Event], git_patch: str | None = None + ) -> tuple[bool, None | list[bool], str]: + """Guess if the issue is fixed based on the history and the issue description. + + Args: + issue: The issue to check + history: The agent's history + git_patch: Optional git patch showing the changes made + """ + last_message = history[-1].message + # Include thread comments in the prompt if they exist + issue_context = issue.body + if issue.thread_comments: + issue_context += '\n\nIssue Thread Comments:\n' + '\n---\n'.join( + issue.thread_comments + ) + + with open( + os.path.join( + os.path.dirname(__file__), + '../prompts/guess_success/issue-success-check.jinja', + ), + 'r', + ) as f: + template = jinja2.Template(f.read()) + prompt = template.render( + issue_context=issue_context, + last_message=last_message, + git_patch=git_patch or self.default_git_patch, + ) + + response = self.llm.completion(messages=[{'role': 'user', 'content': prompt}]) + + answer = response.choices[0].message.content.strip() + pattern = r'--- success\n*(true|false)\n*--- explanation*\n((?:.|\n)*)' + match = re.search(pattern, answer) + if match: + return match.group(1).lower() == 'true', None, match.group(2) + + return False, None, f'Failed to decode answer from LLM response: {answer}' + + def get_converted_issues( + self, issue_numbers: list[int] | None = None, comment_id: int | None = None + ) -> list[Issue]: + return self._strategy.get_converted_issues(issue_numbers, comment_id) diff --git a/openhands/resolver/issue_definitions.py b/openhands/resolver/issue_definitions.py deleted file mode 100644 index b9d7e83a3071..000000000000 --- a/openhands/resolver/issue_definitions.py +++ /dev/null @@ -1,806 +0,0 @@ -import json -import os -import re -from abc import ABC, abstractmethod -from typing import Any, ClassVar - -import jinja2 -import requests - -from openhands.core.config import LLMConfig -from openhands.core.logger import openhands_logger as logger -from openhands.events.event import Event -from openhands.llm.llm import LLM -from openhands.resolver.github_issue import GithubIssue, ReviewThread - - -class IssueHandlerInterface(ABC): - issue_type: ClassVar[str] - llm: LLM - - @abstractmethod - def get_converted_issues( - self, issue_numbers: list[int] | None = None, comment_id: int | None = None - ) -> list[GithubIssue]: - """Download issues from GitHub.""" - pass - - @abstractmethod - def get_instruction( - self, - issue: GithubIssue, - prompt_template: str, - repo_instruction: str | None = None, - ) -> tuple[str, list[str]]: - """Generate instruction and image urls for the agent.""" - pass - - @abstractmethod - def guess_success( - self, issue: GithubIssue, history: list[Event], git_patch: str | None = None - ) -> tuple[bool, list[bool] | None, str]: - """Guess if the issue has been resolved based on the agent's output and git patch.""" - pass - - -class IssueHandler(IssueHandlerInterface): - issue_type: ClassVar[str] = 'issue' - default_git_patch: ClassVar[str] = 'No changes made yet' - - def __init__(self, owner: str, repo: str, token: str, llm_config: LLMConfig): - self.download_url = 'https://api.github.com/repos/{}/{}/issues' - self.owner = owner - self.repo = repo - self.token = token - self.llm = LLM(llm_config) - - def _download_issues_from_github(self) -> list[Any]: - url = self.download_url.format(self.owner, self.repo) - headers = { - 'Authorization': f'token {self.token}', - 'Accept': 'application/vnd.github.v3+json', - } - params: dict[str, int | str] = {'state': 'open', 'per_page': 100, 'page': 1} - all_issues = [] - - # Get issues, page by page - while True: - response = requests.get(url, headers=headers, params=params) - response.raise_for_status() - issues = response.json() - - # No more issues, break the loop - if not issues: - break - - # Sanity check - the response is a list of dictionaries - if not isinstance(issues, list) or any( - [not isinstance(issue, dict) for issue in issues] - ): - raise ValueError('Expected list of dictionaries from Github API.') - - # Add the issues to the final list - all_issues.extend(issues) - assert isinstance(params['page'], int) - params['page'] += 1 - - return all_issues - - def _extract_image_urls(self, issue_body: str) -> list[str]: - # Regular expression to match Markdown image syntax ![alt text](image_url) - image_pattern = r'!\[.*?\]\((https?://[^\s)]+)\)' - return re.findall(image_pattern, issue_body) - - def _extract_issue_references(self, body: str) -> list[int]: - # First, remove code blocks as they may contain false positives - body = re.sub(r'```.*?```', '', body, flags=re.DOTALL) - - # Remove inline code - body = re.sub(r'`[^`]*`', '', body) - - # Remove URLs that contain hash symbols - body = re.sub(r'https?://[^\s)]*#\d+[^\s)]*', '', body) - - # Now extract issue numbers, making sure they're not part of other text - # The pattern matches #number that: - # 1. Is at the start of text or after whitespace/punctuation - # 2. Is followed by whitespace, punctuation, or end of text - # 3. Is not part of a URL - pattern = r'(?:^|[\s\[({]|[^\w#])#(\d+)(?=[\s,.\])}]|$)' - return [int(match) for match in re.findall(pattern, body)] - - def _get_issue_comments( - self, issue_number: int, comment_id: int | None = None - ) -> list[str] | None: - """Retrieve comments for a specific issue from Github. - - Args: - issue_number: The ID of the issue to get comments for - comment_id: The ID of a single comment, if provided, otherwise all comments - """ - url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{issue_number}/comments' - headers = { - 'Authorization': f'token {self.token}', - 'Accept': 'application/vnd.github.v3+json', - } - params = {'per_page': 100, 'page': 1} - all_comments = [] - - # Get comments, page by page - while True: - response = requests.get(url, headers=headers, params=params) - response.raise_for_status() - comments = response.json() - - if not comments: - break - - # If a single comment ID is provided, return only that comment - if comment_id: - matching_comment = next( - ( - comment['body'] - for comment in comments - if comment['id'] == comment_id - ), - None, - ) - if matching_comment: - return [matching_comment] - else: - # Otherwise, return all comments - all_comments.extend([comment['body'] for comment in comments]) - - params['page'] += 1 - - return all_comments if all_comments else None - - def get_converted_issues( - self, issue_numbers: list[int] | None = None, comment_id: int | None = None - ) -> list[GithubIssue]: - """Download issues from Github. - - Args: - issue_numbers: The numbers of the issues to download - comment_id: The ID of a single comment, if provided, otherwise all comments - - Returns: - List of Github issues. - """ - - if not issue_numbers: - raise ValueError('Unspecified issue number') - - all_issues = self._download_issues_from_github() - logger.info(f'Limiting resolving to issues {issue_numbers}.') - all_issues = [ - issue - for issue in all_issues - if issue['number'] in issue_numbers and 'pull_request' not in issue - ] - - if len(issue_numbers) == 1 and not all_issues: - raise ValueError(f'Issue {issue_numbers[0]} not found') - - converted_issues = [] - for issue in all_issues: - # Check for required fields (number and title) - if any([issue.get(key) is None for key in ['number', 'title']]): - logger.warning( - f'Skipping issue {issue} as it is missing number or title.' - ) - continue - - # Handle empty body by using empty string - if issue.get('body') is None: - issue['body'] = '' - - # Get issue thread comments - thread_comments = self._get_issue_comments( - issue['number'], comment_id=comment_id - ) - # Convert empty lists to None for optional fields - issue_details = GithubIssue( - owner=self.owner, - repo=self.repo, - number=issue['number'], - title=issue['title'], - body=issue['body'], - thread_comments=thread_comments, - review_comments=None, # Initialize review comments as None for regular issues - ) - - converted_issues.append(issue_details) - - return converted_issues - - def get_instruction( - self, - issue: GithubIssue, - prompt_template: str, - repo_instruction: str | None = None, - ) -> tuple[str, list[str]]: - """Generate instruction for the agent. - - Args: - issue: The issue to generate instruction for - prompt_template: The prompt template to use - repo_instruction: The repository instruction if it exists - """ - - # Format thread comments if they exist - thread_context = '' - if issue.thread_comments: - thread_context = '\n\nIssue Thread Comments:\n' + '\n---\n'.join( - issue.thread_comments - ) - - # Extract image URLs from the issue body and thread comments - images = [] - images.extend(self._extract_image_urls(issue.body)) - images.extend(self._extract_image_urls(thread_context)) - - template = jinja2.Template(prompt_template) - return ( - template.render( - body=issue.title + '\n\n' + issue.body + thread_context, - repo_instruction=repo_instruction, - ), - images, - ) - - def guess_success( - self, issue: GithubIssue, history: list[Event], git_patch: str | None = None - ) -> tuple[bool, None | list[bool], str]: - """Guess if the issue is fixed based on the history and the issue description. - - Args: - issue: The issue to check - history: The agent's history - git_patch: Optional git patch showing the changes made - """ - last_message = history[-1].message - - # Include thread comments in the prompt if they exist - issue_context = issue.body - if issue.thread_comments: - issue_context += '\n\nIssue Thread Comments:\n' + '\n---\n'.join( - issue.thread_comments - ) - - # Prepare the prompt - with open( - os.path.join( - os.path.dirname(__file__), - 'prompts/guess_success/issue-success-check.jinja', - ), - 'r', - ) as f: - template = jinja2.Template(f.read()) - prompt = template.render( - issue_context=issue_context, - last_message=last_message, - git_patch=git_patch or self.default_git_patch, - ) - - # Get the LLM response and check for 'success' and 'explanation' in the answer - response = self.llm.completion(messages=[{'role': 'user', 'content': prompt}]) - - answer = response.choices[0].message.content.strip() - pattern = r'--- success\n*(true|false)\n*--- explanation*\n((?:.|\n)*)' - match = re.search(pattern, answer) - if match: - return match.group(1).lower() == 'true', None, match.group(2) - - return False, None, f'Failed to decode answer from LLM response: {answer}' - - -class PRHandler(IssueHandler): - issue_type: ClassVar[str] = 'pr' - - def __init__(self, owner: str, repo: str, token: str, llm_config: LLMConfig): - super().__init__(owner, repo, token, llm_config) - self.download_url = 'https://api.github.com/repos/{}/{}/pulls' - - def __download_pr_metadata( - self, pull_number: int, comment_id: int | None = None - ) -> tuple[list[str], list[int], list[str], list[ReviewThread], list[str]]: - """Run a GraphQL query against the GitHub API for information. - - Retrieves information about: - 1. unresolved review comments - 2. referenced issues the pull request would close - - Args: - pull_number: The number of the pull request to query. - comment_id: Optional ID of a specific comment to focus on. - query: The GraphQL query as a string. - variables: A dictionary of variables for the query. - token: Your GitHub personal access token. - - Returns: - The JSON response from the GitHub API. - """ - # Using graphql as REST API doesn't indicate resolved status for review comments - # TODO: grabbing the first 10 issues, 100 review threads, and 100 coments; add pagination to retrieve all - query = """ - query($owner: String!, $repo: String!, $pr: Int!) { - repository(owner: $owner, name: $repo) { - pullRequest(number: $pr) { - closingIssuesReferences(first: 10) { - edges { - node { - body - number - } - } - } - url - reviews(first: 100) { - nodes { - body - state - fullDatabaseId - } - } - reviewThreads(first: 100) { - edges{ - node{ - id - isResolved - comments(first: 100) { - totalCount - nodes { - body - path - fullDatabaseId - } - } - } - } - } - } - } - } - """ - - variables = {'owner': self.owner, 'repo': self.repo, 'pr': pull_number} - - # Run the query - url = 'https://api.github.com/graphql' - headers = { - 'Authorization': f'Bearer {self.token}', - 'Content-Type': 'application/json', - } - - response = requests.post( - url, json={'query': query, 'variables': variables}, headers=headers - ) - response.raise_for_status() - response_json = response.json() - - # Parse the response to get closing issue references and unresolved review comments - pr_data = ( - response_json.get('data', {}).get('repository', {}).get('pullRequest', {}) - ) - - # Get closing issues - closing_issues = pr_data.get('closingIssuesReferences', {}).get('edges', []) - closing_issues_bodies = [issue['node']['body'] for issue in closing_issues] - closing_issue_numbers = [ - issue['node']['number'] for issue in closing_issues - ] # Extract issue numbers - - # Get review comments - reviews = pr_data.get('reviews', {}).get('nodes', []) - if comment_id is not None: - reviews = [ - review - for review in reviews - if int(review['fullDatabaseId']) == comment_id - ] - review_bodies = [review['body'] for review in reviews] - - # Get unresolved review threads - review_threads = [] - thread_ids = [] # Store thread IDs; agent replies to the thread - raw_review_threads = pr_data.get('reviewThreads', {}).get('edges', []) - for thread in raw_review_threads: - node = thread.get('node', {}) - if not node.get( - 'isResolved', True - ): # Check if the review thread is unresolved - id = node.get('id') - thread_contains_comment_id = False - my_review_threads = node.get('comments', {}).get('nodes', []) - message = '' - files = [] - for i, review_thread in enumerate(my_review_threads): - if ( - comment_id is not None - and int(review_thread['fullDatabaseId']) == comment_id - ): - thread_contains_comment_id = True - - if ( - i == len(my_review_threads) - 1 - ): # Check if it's the last thread in the thread - if len(my_review_threads) > 1: - message += '---\n' # Add "---" before the last message if there's more than one thread - message += 'latest feedback:\n' + review_thread['body'] + '\n' - else: - message += ( - review_thread['body'] + '\n' - ) # Add each thread in a new line - - # Source files on which the comments were made - file = review_thread.get('path') - if file and file not in files: - files.append(file) - - # If the comment ID is not provided or the thread contains the comment ID, add the thread to the list - if comment_id is None or thread_contains_comment_id: - unresolved_thread = ReviewThread(comment=message, files=files) - review_threads.append(unresolved_thread) - thread_ids.append(id) - - return ( - closing_issues_bodies, - closing_issue_numbers, - review_bodies, - review_threads, - thread_ids, - ) - - # Override processing of downloaded issues - def _get_pr_comments( - self, pr_number: int, comment_id: int | None = None - ) -> list[str] | None: - """Download comments for a specific pull request from Github.""" - url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{pr_number}/comments' - headers = { - 'Authorization': f'token {self.token}', - 'Accept': 'application/vnd.github.v3+json', - } - params = {'per_page': 100, 'page': 1} - all_comments = [] - - while True: - response = requests.get(url, headers=headers, params=params) - response.raise_for_status() - comments = response.json() - - if not comments: - break - - if comment_id is not None: - matching_comment = next( - ( - comment['body'] - for comment in comments - if comment['id'] == comment_id - ), - None, - ) - if matching_comment: - return [matching_comment] - else: - all_comments.extend([comment['body'] for comment in comments]) - - params['page'] += 1 - - return all_comments if all_comments else None - - def __get_context_from_external_issues_references( - self, - closing_issues: list[str], - closing_issue_numbers: list[int], - issue_body: str, - review_comments: list[str], - review_threads: list[ReviewThread], - thread_comments: list[str] | None, - ): - new_issue_references = [] - - if issue_body: - new_issue_references.extend(self._extract_issue_references(issue_body)) - - if review_comments: - for comment in review_comments: - new_issue_references.extend(self._extract_issue_references(comment)) - - if review_threads: - for review_thread in review_threads: - new_issue_references.extend( - self._extract_issue_references(review_thread.comment) - ) - - if thread_comments: - for thread_comment in thread_comments: - new_issue_references.extend( - self._extract_issue_references(thread_comment) - ) - - non_duplicate_references = set(new_issue_references) - unique_issue_references = non_duplicate_references.difference( - closing_issue_numbers - ) - - for issue_number in unique_issue_references: - try: - url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{issue_number}' - headers = { - 'Authorization': f'Bearer {self.token}', - 'Accept': 'application/vnd.github.v3+json', - } - response = requests.get(url, headers=headers) - response.raise_for_status() - issue_data = response.json() - issue_body = issue_data.get('body', '') - if issue_body: - closing_issues.append(issue_body) - except requests.exceptions.RequestException as e: - logger.warning(f'Failed to fetch issue {issue_number}: {str(e)}') - - return closing_issues - - def get_converted_issues( - self, issue_numbers: list[int] | None = None, comment_id: int | None = None - ) -> list[GithubIssue]: - if not issue_numbers: - raise ValueError('Unspecified issue numbers') - - all_issues = self._download_issues_from_github() - logger.info(f'Limiting resolving to issues {issue_numbers}.') - all_issues = [issue for issue in all_issues if issue['number'] in issue_numbers] - - converted_issues = [] - for issue in all_issues: - # For PRs, body can be None - if any([issue.get(key) is None for key in ['number', 'title']]): - logger.warning(f'Skipping #{issue} as it is missing number or title.') - continue - - # Handle None body for PRs - body = issue.get('body') if issue.get('body') is not None else '' - ( - closing_issues, - closing_issues_numbers, - review_comments, - review_threads, - thread_ids, - ) = self.__download_pr_metadata(issue['number'], comment_id=comment_id) - head_branch = issue['head']['ref'] - - # Get PR thread comments - thread_comments = self._get_pr_comments( - issue['number'], comment_id=comment_id - ) - - closing_issues = self.__get_context_from_external_issues_references( - closing_issues, - closing_issues_numbers, - body, - review_comments, - review_threads, - thread_comments, - ) - - issue_details = GithubIssue( - owner=self.owner, - repo=self.repo, - number=issue['number'], - title=issue['title'], - body=body, - closing_issues=closing_issues, - review_comments=review_comments, - review_threads=review_threads, - thread_ids=thread_ids, - head_branch=head_branch, - thread_comments=thread_comments, - ) - - converted_issues.append(issue_details) - - return converted_issues - - def get_instruction( - self, - issue: GithubIssue, - prompt_template: str, - repo_instruction: str | None = None, - ) -> tuple[str, list[str]]: - """Generate instruction for the agent.""" - template = jinja2.Template(prompt_template) - images = [] - - issues_str = None - if issue.closing_issues: - issues_str = json.dumps(issue.closing_issues, indent=4) - images.extend(self._extract_image_urls(issues_str)) - - # Handle PRs with review comments - review_comments_str = None - if issue.review_comments: - review_comments_str = json.dumps(issue.review_comments, indent=4) - images.extend(self._extract_image_urls(review_comments_str)) - - # Handle PRs with file-specific review comments - review_thread_str = None - review_thread_file_str = None - if issue.review_threads: - review_threads = [ - review_thread.comment for review_thread in issue.review_threads - ] - review_thread_files = [] - for review_thread in issue.review_threads: - review_thread_files.extend(review_thread.files) - review_thread_str = json.dumps(review_threads, indent=4) - review_thread_file_str = json.dumps(review_thread_files, indent=4) - images.extend(self._extract_image_urls(review_thread_str)) - - # Format thread comments if they exist - thread_context = '' - if issue.thread_comments: - thread_context = '\n---\n'.join(issue.thread_comments) - images.extend(self._extract_image_urls(thread_context)) - - instruction = template.render( - issues=issues_str, - review_comments=review_comments_str, - review_threads=review_thread_str, - files=review_thread_file_str, - thread_context=thread_context, - repo_instruction=repo_instruction, - ) - return instruction, images - - def _check_feedback_with_llm(self, prompt: str) -> tuple[bool, str]: - """Helper function to check feedback with LLM and parse response.""" - response = self.llm.completion(messages=[{'role': 'user', 'content': prompt}]) - - answer = response.choices[0].message.content.strip() - pattern = r'--- success\n*(true|false)\n*--- explanation*\n((?:.|\n)*)' - match = re.search(pattern, answer) - if match: - return match.group(1).lower() == 'true', match.group(2).strip() - return False, f'Failed to decode answer from LLM response: {answer}' - - def _check_review_thread( - self, - review_thread: ReviewThread, - issues_context: str, - last_message: str, - git_patch: str | None = None, - ) -> tuple[bool, str]: - """Check if a review thread's feedback has been addressed.""" - files_context = json.dumps(review_thread.files, indent=4) - - with open( - os.path.join( - os.path.dirname(__file__), - 'prompts/guess_success/pr-feedback-check.jinja', - ), - 'r', - ) as f: - template = jinja2.Template(f.read()) - - prompt = template.render( - issue_context=issues_context, - feedback=review_thread.comment, - files_context=files_context, - last_message=last_message, - git_patch=git_patch or self.default_git_patch, - ) - - return self._check_feedback_with_llm(prompt) - - def _check_thread_comments( - self, - thread_comments: list[str], - issues_context: str, - last_message: str, - git_patch: str | None = None, - ) -> tuple[bool, str]: - """Check if thread comments feedback has been addressed.""" - thread_context = '\n---\n'.join(thread_comments) - - with open( - os.path.join( - os.path.dirname(__file__), 'prompts/guess_success/pr-thread-check.jinja' - ), - 'r', - ) as f: - template = jinja2.Template(f.read()) - - prompt = template.render( - issue_context=issues_context, - thread_context=thread_context, - last_message=last_message, - git_patch=git_patch or self.default_git_patch, - ) - - return self._check_feedback_with_llm(prompt) - - def _check_review_comments( - self, - review_comments: list[str], - issues_context: str, - last_message: str, - git_patch: str | None = None, - ) -> tuple[bool, str]: - """Check if review comments feedback has been addressed.""" - review_context = '\n---\n'.join(review_comments) - - with open( - os.path.join( - os.path.dirname(__file__), 'prompts/guess_success/pr-review-check.jinja' - ), - 'r', - ) as f: - template = jinja2.Template(f.read()) - - prompt = template.render( - issue_context=issues_context, - review_context=review_context, - last_message=last_message, - git_patch=git_patch or self.default_git_patch, - ) - - return self._check_feedback_with_llm(prompt) - - def guess_success( - self, issue: GithubIssue, history: list[Event], git_patch: str | None = None - ) -> tuple[bool, None | list[bool], str]: - """Guess if the issue is fixed based on the history, issue description and git patch.""" - last_message = history[-1].message - - issues_context = json.dumps(issue.closing_issues, indent=4) - success_list = [] - explanation_list = [] - - # Handle PRs with file-specific review comments - if issue.review_threads: - for review_thread in issue.review_threads: - if issues_context and last_message: - success, explanation = self._check_review_thread( - review_thread, issues_context, last_message, git_patch - ) - else: - success, explanation = False, 'Missing context or message' - success_list.append(success) - explanation_list.append(explanation) - # Handle PRs with only thread comments (no file-specific review comments) - elif issue.thread_comments: - if issue.thread_comments and issues_context and last_message: - success, explanation = self._check_thread_comments( - issue.thread_comments, issues_context, last_message, git_patch - ) - else: - success, explanation = ( - False, - 'Missing thread comments, context or message', - ) - success_list.append(success) - explanation_list.append(explanation) - elif issue.review_comments: - # Handle PRs with only review comments (no file-specific review comments or thread comments) - if issue.review_comments and issues_context and last_message: - success, explanation = self._check_review_comments( - issue.review_comments, issues_context, last_message, git_patch - ) - else: - success, explanation = ( - False, - 'Missing review comments, context or message', - ) - success_list.append(success) - explanation_list.append(explanation) - else: - # No review comments, thread comments, or file-level review comments found - return False, None, 'No feedback was found to process' - - # Return overall success (all must be true) and explanations - if not success_list: - return False, None, 'No feedback was processed' - return all(success_list), success_list, json.dumps(explanation_list) diff --git a/openhands/resolver/resolve_all_issues.py b/openhands/resolver/resolve_all_issues.py index 6192fc02f8e7..6aa32396545d 100644 --- a/openhands/resolver/resolve_all_issues.py +++ b/openhands/resolver/resolve_all_issues.py @@ -13,12 +13,16 @@ import openhands from openhands.core.config import LLMConfig from openhands.core.logger import openhands_logger as logger -from openhands.resolver.github_issue import GithubIssue +from openhands.resolver.interfaces.issue import Issue from openhands.resolver.resolve_issue import ( issue_handler_factory, process_issue, ) from openhands.resolver.resolver_output import ResolverOutput +from openhands.resolver.utils import ( + Platform, + identify_token, +) def cleanup(): @@ -51,6 +55,7 @@ async def resolve_issues( repo: str, token: str, username: str, + platform: Platform, max_iterations: int, limit_issues: int | None, num_workers: int, @@ -62,13 +67,13 @@ async def resolve_issues( repo_instruction: str | None, issue_numbers: list[int] | None, ) -> None: - """Resolve multiple github issues. + """Resolve multiple github or gitlab issues. Args: - owner: Github owner of the repo. - repo: Github repository to resolve issues in form of `owner/repo`. - token: Github token to access the repository. - username: Github username to access the repository. + owner: Github or Gitlab owner of the repo. + repo: Github or Gitlab repository to resolve issues in form of `owner/repo`. + token: Github or Gitlab token to access the repository. + username: Github or Gitlab username to access the repository. max_iterations: Maximum number of iterations to run. limit_issues: Limit the number of issues to resolve. num_workers: Number of workers to use for parallel processing. @@ -80,10 +85,12 @@ async def resolve_issues( repo_instruction: Repository instruction to use. issue_numbers: List of issue numbers to resolve. """ - issue_handler = issue_handler_factory(issue_type, owner, repo, token, llm_config) + issue_handler = issue_handler_factory( + issue_type, owner, repo, token, llm_config, platform + ) # Load dataset - issues: list[GithubIssue] = issue_handler.get_converted_issues( + issues: list[Issue] = issue_handler.get_converted_issues( issue_numbers=issue_numbers ) @@ -107,7 +114,7 @@ async def resolve_issues( [ 'git', 'clone', - f'https://{username}:{token}@github.com/{owner}/{repo}', + issue_handler.get_clone_url(), f'{output_dir}/repo', ] ).decode('utf-8') @@ -188,6 +195,7 @@ async def resolve_issues( task = update_progress( process_issue( issue, + platform, base_commit, max_iterations, llm_config, @@ -221,24 +229,26 @@ async def run_with_semaphore(task): def main(): - parser = argparse.ArgumentParser(description='Resolve multiple issues from Github.') + parser = argparse.ArgumentParser( + description='Resolve multiple issues from Github or Gitlab.' + ) parser.add_argument( '--repo', type=str, required=True, - help='Github repository to resolve issues in form of `owner/repo`.', + help='Github or Gitlab repository to resolve issues in form of `owner/repo`.', ) parser.add_argument( '--token', type=str, default=None, - help='Github token to access the repository.', + help='Github or Gitlab token to access the repository.', ) parser.add_argument( '--username', type=str, default=None, - help='Github username to access the repository.', + help='Github or Gitlab username to access the repository.', ) parser.add_argument( '--runtime-container-image', @@ -323,15 +333,20 @@ def main(): ) owner, repo = my_args.repo.split('/') - token = my_args.token if my_args.token else os.getenv('GITHUB_TOKEN') - username = my_args.username if my_args.username else os.getenv('GITHUB_USERNAME') + token = my_args.token or os.getenv('GITHUB_TOKEN') or os.getenv('GITLAB_TOKEN') + username = my_args.username if my_args.username else os.getenv('GIT_USERNAME') if not username: - raise ValueError('Github username is required.') + raise ValueError('Username is required.') if not token: - raise ValueError('Github token is required.') + raise ValueError('Token is required.') + + platform = identify_token(token) + if platform == Platform.INVALID: + raise ValueError('Token is invalid.') api_key = my_args.llm_api_key or os.environ['LLM_API_KEY'] + llm_config = LLMConfig( model=my_args.llm_model or os.environ['LLM_MODEL'], api_key=str(api_key) if api_key else None, @@ -369,6 +384,7 @@ def main(): repo=repo, token=token, username=username, + platform=platform, runtime_container_image=runtime_container_image, max_iterations=my_args.max_iterations, limit_issues=my_args.limit_issues, diff --git a/openhands/resolver/resolve_issue.py b/openhands/resolver/resolve_issue.py index 45c9e33af7c0..80cddb9ed581 100644 --- a/openhands/resolver/resolve_issue.py +++ b/openhands/resolver/resolve_issue.py @@ -24,15 +24,19 @@ Observation, ) from openhands.events.stream import EventStreamSubscriber -from openhands.resolver.github_issue import GithubIssue -from openhands.resolver.issue_definitions import ( - IssueHandler, - IssueHandlerInterface, - PRHandler, +from openhands.resolver.interfaces.github import GithubIssueHandler, GithubPRHandler +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler, GitlabPRHandler +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, ) from openhands.resolver.resolver_output import ResolverOutput from openhands.resolver.utils import ( + Platform, codeact_user_response, + get_unique_uid, + identify_token, reset_logger_for_multiprocessing, ) from openhands.runtime.base import Runtime @@ -43,6 +47,7 @@ def initialize_runtime( runtime: Runtime, + platform: Platform, ): """Initialize the runtime for the agent. @@ -61,6 +66,12 @@ def initialize_runtime( if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0: raise RuntimeError(f'Failed to change directory to /workspace.\n{obs}') + if platform == Platform.GITLAB and os.getenv('GITLAB_CI') == 'true': + action = CmdRunAction(command='sudo chown -R 1001:0 /workspace/*') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + action = CmdRunAction(command='git config --global core.pager ""') logger.info(action, extra={'msg_type': 'ACTION'}) obs = runtime.run_action(action) @@ -72,6 +83,7 @@ def initialize_runtime( async def complete_runtime( runtime: Runtime, base_commit: str, + platform: Platform, ) -> dict[str, Any]: """Complete the runtime for the agent. @@ -107,7 +119,11 @@ async def complete_runtime( if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0: raise RuntimeError(f'Failed to set git config. Observation: {obs}') - action = CmdRunAction(command='git add -A') + if platform == Platform.GITLAB and os.getenv('GITLAB_CI') == 'true': + action = CmdRunAction(command='sudo git add -A') + else: + action = CmdRunAction(command='git add -A') + logger.info(action, extra={'msg_type': 'ACTION'}) obs = runtime.run_action(action) logger.info(obs, extra={'msg_type': 'OBSERVATION'}) @@ -143,14 +159,15 @@ async def complete_runtime( async def process_issue( - issue: GithubIssue, + issue: Issue, + platform: Platform, base_commit: str, max_iterations: int, llm_config: LLMConfig, output_dir: str, runtime_container_image: str | None, prompt_template: str, - issue_handler: IssueHandlerInterface, + issue_handler: ServiceContextIssue | ServiceContextPR, repo_instruction: str | None = None, reset_logger: bool = False, ) -> ResolverOutput: @@ -172,6 +189,16 @@ async def process_issue( shutil.rmtree(workspace_base) shutil.copytree(os.path.join(output_dir, 'repo'), workspace_base) + # This code looks unnecessary because these are default values in the config class + # they're set by default if nothing else overrides them + # FIXME we should remove them here + kwargs = {} + if os.getenv('GITLAB_CI') == 'True': + kwargs['local_runtime_url'] = os.getenv('LOCAL_RUNTIME_URL', 'http://localhost') + user_id = os.getuid() if hasattr(os, 'getuid') else 1000 + if user_id == 0: + kwargs['user_id'] = get_unique_uid() + config = AppConfig( default_agent='CodeActAgent', runtime='docker', @@ -183,6 +210,7 @@ async def process_issue( use_host_network=False, # large enough timeout, since some testcases take very long to run timeout=300, + **kwargs, ), # do not mount workspace workspace_base=workspace_base, @@ -199,7 +227,7 @@ def on_event(evt): runtime.event_stream.subscribe(EventStreamSubscriber.MAIN, on_event, str(uuid4())) - initialize_runtime(runtime) + initialize_runtime(runtime, platform) instruction, images_urls = issue_handler.get_instruction( issue, prompt_template, repo_instruction @@ -222,7 +250,7 @@ def on_event(evt): last_error: str | None = error_msg # Get git patch - return_val = await complete_runtime(runtime, base_commit) + return_val = await complete_runtime(runtime, base_commit, platform) git_patch = return_val['git_patch'] logger.info( f'Got git diff for instance {issue.number}:\n--------\n{git_patch}\n--------' @@ -283,12 +311,32 @@ def on_event(evt): def issue_handler_factory( - issue_type: str, owner: str, repo: str, token: str, llm_config: LLMConfig -) -> IssueHandlerInterface: + issue_type: str, + owner: str, + repo: str, + token: str, + llm_config: LLMConfig, + platform: Platform, + username: str | None = None, +) -> ServiceContextIssue | ServiceContextPR: if issue_type == 'issue': - return IssueHandler(owner, repo, token, llm_config) + if platform == Platform.GITHUB: + return ServiceContextIssue( + GithubIssueHandler(owner, repo, token, username), llm_config + ) + else: # platform == Platform.GITLAB + return ServiceContextIssue( + GitlabIssueHandler(owner, repo, token, username), llm_config + ) elif issue_type == 'pr': - return PRHandler(owner, repo, token, llm_config) + if platform == Platform.GITHUB: + return ServiceContextPR( + GithubPRHandler(owner, repo, token, username), llm_config + ) + else: # platform == Platform.GITLAB + return ServiceContextPR( + GitlabPRHandler(owner, repo, token, username), llm_config + ) else: raise ValueError(f'Invalid issue type: {issue_type}') @@ -298,6 +346,7 @@ async def resolve_issue( repo: str, token: str, username: str, + platform: Platform, max_iterations: int, output_dir: str, llm_config: LLMConfig, @@ -309,13 +358,14 @@ async def resolve_issue( comment_id: int | None, reset_logger: bool = False, ) -> None: - """Resolve a single github issue. + """Resolve a single issue. Args: - owner: Github owner of the repo. - repo: Github repository to resolve issues in form of `owner/repo`. - token: Github token to access the repository. - username: Github username to access the repository. + owner: owner of the repo. + repo: repository to resolve issues in form of `owner/repo`. + token: token to access the repository. + username: username to access the repository. + platform: platform of the repository. max_iterations: Maximum number of iterations to run. output_dir: Output directory to write the results. llm_config: Configuration for the language model. @@ -328,10 +378,12 @@ async def resolve_issue( reset_logger: Whether to reset the logger for multiprocessing. """ - issue_handler = issue_handler_factory(issue_type, owner, repo, token, llm_config) + issue_handler = issue_handler_factory( + issue_type, owner, repo, token, llm_config, platform, username + ) # Load dataset - issues: list[GithubIssue] = issue_handler.get_converted_issues( + issues: list[Issue] = issue_handler.get_converted_issues( issue_numbers=[issue_number], comment_id=comment_id ) @@ -377,7 +429,7 @@ async def resolve_issue( [ 'git', 'clone', - f'https://{username}:{token}@github.com/{owner}/{repo}', + issue_handler.get_clone_url(), f'{output_dir}/repo', ] ).decode('utf-8') @@ -453,6 +505,7 @@ async def resolve_issue( output = await process_issue( issue, + platform, base_commit, max_iterations, llm_config, @@ -480,24 +533,24 @@ def int_or_none(value): else: return int(value) - parser = argparse.ArgumentParser(description='Resolve a single issue from Github.') + parser = argparse.ArgumentParser(description='Resolve a single issue.') parser.add_argument( '--repo', type=str, required=True, - help='Github repository to resolve issues in form of `owner/repo`.', + help='repository to resolve issues in form of `owner/repo`.', ) parser.add_argument( '--token', type=str, default=None, - help='Github token to access the repository.', + help='token to access the repository.', ) parser.add_argument( '--username', type=str, default=None, - help='Github username to access the repository.', + help='username to access the repository.', ) parser.add_argument( '--runtime-container-image', @@ -581,14 +634,22 @@ def int_or_none(value): f'ghcr.io/all-hands-ai/runtime:{openhands.__version__}-nikolaik' ) - owner, repo = my_args.repo.split('/') - token = my_args.token if my_args.token else os.getenv('GITHUB_TOKEN') - username = my_args.username if my_args.username else os.getenv('GITHUB_USERNAME') + parts = my_args.repo.rsplit('/', 1) + if len(parts) < 2: + raise ValueError('Invalid repo name') + owner, repo = parts + + token = my_args.token or os.getenv('GITHUB_TOKEN') or os.getenv('GITLAB_TOKEN') + username = my_args.username if my_args.username else os.getenv('GIT_USERNAME') if not username: - raise ValueError('Github username is required.') + raise ValueError('Username is required.') if not token: - raise ValueError('Github token is required.') + raise ValueError('Token is required.') + + platform = identify_token(token) + if platform == Platform.INVALID: + raise ValueError('Token is invalid.') api_key = my_args.llm_api_key or os.environ['LLM_API_KEY'] llm_config = LLMConfig( @@ -624,6 +685,7 @@ def int_or_none(value): repo=repo, token=token, username=username, + platform=platform, runtime_container_image=runtime_container_image, max_iterations=my_args.max_iterations, output_dir=my_args.output_dir, diff --git a/openhands/resolver/resolver_output.py b/openhands/resolver/resolver_output.py index 7ae89e164250..9394783ff07c 100644 --- a/openhands/resolver/resolver_output.py +++ b/openhands/resolver/resolver_output.py @@ -2,12 +2,12 @@ from litellm import BaseModel -from openhands.resolver.github_issue import GithubIssue +from openhands.resolver.interfaces.issue import Issue class ResolverOutput(BaseModel): # NOTE: User-specified - issue: GithubIssue + issue: Issue issue_type: str instruction: str base_commit: str diff --git a/openhands/resolver/send_pull_request.py b/openhands/resolver/send_pull_request.py index 6b37502aaa4a..7cbe37cfcca0 100644 --- a/openhands/resolver/send_pull_request.py +++ b/openhands/resolver/send_pull_request.py @@ -5,18 +5,24 @@ import subprocess import jinja2 -import requests from openhands.core.config import LLMConfig from openhands.core.logger import openhands_logger as logger from openhands.llm.llm import LLM -from openhands.resolver.github_issue import GithubIssue +from openhands.resolver.interfaces.github import GithubIssueHandler +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.interfaces.issue_definitions import ServiceContextIssue from openhands.resolver.io_utils import ( load_all_resolver_outputs, load_single_resolver_output, ) from openhands.resolver.patching import apply_diff, parse_patch from openhands.resolver.resolver_output import ResolverOutput +from openhands.resolver.utils import ( + Platform, + identify_token, +) def apply_patch(repo_dir: str, patch: str) -> None: @@ -153,7 +159,7 @@ def initialize_repo( return dest_dir -def make_commit(repo_dir: str, issue: GithubIssue, issue_type: str) -> None: +def make_commit(repo_dir: str, issue: Issue, issue_type: str) -> None: """Make a commit with the changes to the repository. Args: @@ -214,25 +220,11 @@ def make_commit(repo_dir: str, issue: GithubIssue, issue_type: str) -> None: raise RuntimeError(f'Failed to commit changes: {result}') -def branch_exists(base_url: str, branch_name: str, headers: dict) -> bool: - """Check if a branch exists in the GitHub repository. - - Args: - base_url: The base URL of the GitHub repository API - branch_name: The name of the branch to check - headers: The HTTP headers to use for authentication - """ - print(f'Checking if branch {branch_name} exists...') - response = requests.get(f'{base_url}/branches/{branch_name}', headers=headers) - exists = response.status_code == 200 - print(f'Branch {branch_name} exists: {exists}') - return exists - - def send_pull_request( - github_issue: GithubIssue, - github_token: str, - github_username: str | None, + issue: Issue, + token: str, + username: str | None, + platform: Platform, patch_dir: str, pr_type: str, fork_owner: str | None = None, @@ -241,53 +233,49 @@ def send_pull_request( reviewer: str | None = None, pr_title: str | None = None, ) -> str: - """Send a pull request to a GitHub repository. + """Send a pull request to a GitHub or Gitlab repository. Args: - github_issue: The issue to send the pull request for - github_token: The GitHub token to use for authentication - github_username: The GitHub username, if provided + issue: The issue to send the pull request for + token: The GitHub or Gitlab token to use for authentication + username: The GitHub or Gitlab username, if provided + platform: The platform of the repository. patch_dir: The directory containing the patches to apply pr_type: The type: branch (no PR created), draft or ready (regular PR created) fork_owner: The owner of the fork to push changes to (if different from the original repo owner) additional_message: The additional messages to post as a comment on the PR in json list format target_branch: The target branch to create the pull request against (defaults to repository default branch) - reviewer: The GitHub username of the reviewer to assign + reviewer: The GitHub or Gitlab username of the reviewer to assign pr_title: Custom title for the pull request (optional) """ if pr_type not in ['branch', 'draft', 'ready']: raise ValueError(f'Invalid pr_type: {pr_type}') - # Set up headers and base URL for GitHub API - headers = { - 'Authorization': f'token {github_token}', - 'Accept': 'application/vnd.github.v3+json', - } - base_url = f'https://api.github.com/repos/{github_issue.owner}/{github_issue.repo}' + handler = None + if platform == Platform.GITHUB: + handler = ServiceContextIssue( + GithubIssueHandler(issue.owner, issue.repo, token, username), None + ) + else: # platform == Platform.GITLAB + handler = ServiceContextIssue( + GitlabIssueHandler(issue.owner, issue.repo, token, username), None + ) # Create a new branch with a unique name - base_branch_name = f'openhands-fix-issue-{github_issue.number}' - branch_name = base_branch_name - attempt = 1 - - # Find a unique branch name - print('Checking if branch exists...') - while branch_exists(base_url, branch_name, headers): - attempt += 1 - branch_name = f'{base_branch_name}-try{attempt}' + base_branch_name = f'openhands-fix-issue-{issue.number}' + branch_name = handler.get_branch_name( + base_branch_name=base_branch_name, + ) # Get the default branch or use specified target branch print('Getting base branch...') if target_branch: base_branch = target_branch - # Verify the target branch exists - response = requests.get(f'{base_url}/branches/{target_branch}', headers=headers) - if response.status_code != 200: + exists = handler.branch_exists(branch_name=target_branch) + if not exists: raise ValueError(f'Target branch {target_branch} does not exist') else: - response = requests.get(f'{base_url}', headers=headers) - response.raise_for_status() - base_branch = response.json()['default_branch'] + base_branch = handler.get_default_branch_name() print(f'Base branch: {base_branch}') # Create and checkout the new branch @@ -304,16 +292,12 @@ def send_pull_request( ) # Determine the repository to push to (original or fork) - push_owner = fork_owner if fork_owner else github_issue.owner - push_repo = github_issue.repo + push_owner = fork_owner if fork_owner else issue.owner + + handler._strategy.set_owner(push_owner) print('Pushing changes...') - username_and_token = ( - f'{github_username}:{github_token}' - if github_username - else f'x-auth-token:{github_token}' - ) - push_url = f'https://{username_and_token}@github.com/{push_owner}/{push_repo}.git' + push_url = handler.get_clone_url() result = subprocess.run( ['git', '-C', patch_dir, 'push', push_url, branch_name], capture_output=True, @@ -325,11 +309,9 @@ def send_pull_request( # Prepare the PR data: title and body final_pr_title = ( - pr_title - if pr_title - else f'Fix issue #{github_issue.number}: {github_issue.title}' + pr_title if pr_title else f'Fix issue #{issue.number}: {issue.title}' ) - pr_body = f'This pull request fixes #{github_issue.number}.' + pr_body = f'This pull request fixes #{issue.number}.' if additional_message: pr_body += f'\n\n{additional_message}' pr_body += '\n\nAutomatic fix generated by [OpenHands](https://github.com/All-Hands-AI/OpenHands/) 🙌' @@ -337,41 +319,25 @@ def send_pull_request( # If we are not sending a PR, we can finish early and return the # URL for the user to open a PR manually if pr_type == 'branch': - url = f'https://github.com/{push_owner}/{github_issue.repo}/compare/{branch_name}?expand=1' + url = handler.get_compare_url(branch_name) else: # Prepare the PR for the GitHub API data = { - 'title': final_pr_title, # No need to escape title for GitHub API - 'body': pr_body, - 'head': branch_name, - 'base': base_branch, + 'title': final_pr_title, + ('body' if platform == Platform.GITHUB else 'description'): pr_body, + ('head' if platform == Platform.GITHUB else 'source_branch'): branch_name, + ('base' if platform == Platform.GITHUB else 'target_branch'): base_branch, 'draft': pr_type == 'draft', } - # Send the PR and get its URL to tell the user - response = requests.post(f'{base_url}/pulls', headers=headers, json=data) - if response.status_code == 403: - raise RuntimeError( - 'Failed to create pull request due to missing permissions. ' - 'Make sure that the provided token has push permissions for the repository.' - ) - response.raise_for_status() - pr_data = response.json() + pr_data = handler.create_pull_request(data) + url = pr_data['html_url'] + print(pr_data) # Request review if a reviewer was specified if reviewer and pr_type != 'branch': - review_data = {'reviewers': [reviewer]} - review_response = requests.post( - f'{base_url}/pulls/{pr_data["number"]}/requested_reviewers', - headers=headers, - json=review_data, - ) - if review_response.status_code != 201: - print( - f'Warning: Failed to request review from {reviewer}: {review_response.text}' - ) - - url = pr_data['html_url'] + number = pr_data['number'] + handler.request_reviewers(reviewer, number) print( f'{pr_type} created: {url}\n\n--- Title: {final_pr_title}\n\n--- Body:\n{pr_body}' @@ -380,74 +346,11 @@ def send_pull_request( return url -def reply_to_comment(github_token: str, comment_id: str, reply: str): - """Reply to a comment on a GitHub issue or pull request. - - Args: - github_token: The GitHub token to use for authentication - comment_id: The ID of the comment to reply to - reply: The reply message to post - """ - # Opting for graphql as REST API doesn't allow reply to replies in comment threads - query = """ - mutation($body: String!, $pullRequestReviewThreadId: ID!) { - addPullRequestReviewThreadReply(input: { body: $body, pullRequestReviewThreadId: $pullRequestReviewThreadId }) { - comment { - id - body - createdAt - } - } - } - """ - - # Prepare the reply to the comment - comment_reply = f'Openhands fix success summary\n\n\n{reply}' - variables = {'body': comment_reply, 'pullRequestReviewThreadId': comment_id} - url = 'https://api.github.com/graphql' - headers = { - 'Authorization': f'Bearer {github_token}', - 'Content-Type': 'application/json', - } - - # Send the reply to the comment - response = requests.post( - url, json={'query': query, 'variables': variables}, headers=headers - ) - response.raise_for_status() - - -def send_comment_msg(base_url: str, issue_number: int, github_token: str, msg: str): - """Send a comment message to a GitHub issue or pull request. - - Args: - base_url: The base URL of the GitHub repository API - issue_number: The issue or pull request number - github_token: The GitHub token to use for authentication - msg: The message content to post as a comment - """ - # Set up headers for GitHub API - headers = { - 'Authorization': f'token {github_token}', - 'Accept': 'application/vnd.github.v3+json', - } - - # Post a comment on the PR - comment_url = f'{base_url}/issues/{issue_number}/comments' - comment_data = {'body': msg} - comment_response = requests.post(comment_url, headers=headers, json=comment_data) - if comment_response.status_code != 201: - print( - f'Failed to post comment: {comment_response.status_code} {comment_response.text}' - ) - else: - print(f'Comment added to the PR: {msg}') - - def update_existing_pull_request( - github_issue: GithubIssue, - github_token: str, - github_username: str | None, + issue: Issue, + token: str, + username: str | None, + platform: Platform, patch_dir: str, llm_config: LLMConfig, comment_message: str | None = None, @@ -456,23 +359,34 @@ def update_existing_pull_request( """Update an existing pull request with the new patches. Args: - github_issue: The issue to update. - github_token: The GitHub token to use for authentication. - github_username: The GitHub username to use for authentication. + issue: The issue to update. + token: The token to use for authentication. + username: The username to use for authentication. + platform: The platform of the repository. patch_dir: The directory containing the patches to apply. llm_config: The LLM configuration to use for summarizing changes. comment_message: The main message to post as a comment on the PR. additional_message: The additional messages to post as a comment on the PR in json list format. """ - # Set up base URL for GitHub API - base_url = f'https://api.github.com/repos/{github_issue.owner}/{github_issue.repo}' - branch_name = github_issue.head_branch + # Set up headers and base URL for GitHub or GitLab API + + handler = None + if platform == Platform.GITHUB: + handler = ServiceContextIssue( + GithubIssueHandler(issue.owner, issue.repo, token, username), llm_config + ) + else: # platform == Platform.GITLAB + handler = ServiceContextIssue( + GitlabIssueHandler(issue.owner, issue.repo, token, username), llm_config + ) + + branch_name = issue.head_branch # Prepare the push command push_command = ( f'git -C {patch_dir} push ' - f'https://{github_username}:{github_token}@github.com/' - f'{github_issue.owner}/{github_issue.repo}.git {branch_name}' + f'{handler.get_authorize_url()}' + f'{issue.owner}/{issue.repo}.git {branch_name}' ) # Push the changes to the existing branch @@ -481,7 +395,7 @@ def update_existing_pull_request( print(f'Error pushing changes: {result.stderr}') raise RuntimeError('Failed to push changes to the remote repository') - pr_url = f'https://github.com/{github_issue.owner}/{github_issue.repo}/pull/{github_issue.number}' + pr_url = handler.get_pull_url(issue.number) print(f'Updated pull request {pr_url} with new patches.') # Generate a summary of all comment success indicators for PR message @@ -517,18 +431,18 @@ def update_existing_pull_request( # Post a comment on the PR if comment_message: - send_comment_msg(base_url, github_issue.number, github_token, comment_message) + handler.send_comment_msg(issue.number, comment_message) # Reply to each unresolved comment thread - if additional_message and github_issue.thread_ids: + if additional_message and issue.thread_ids: try: explanations = json.loads(additional_message) for count, reply_comment in enumerate(explanations): - comment_id = github_issue.thread_ids[count] - reply_to_comment(github_token, comment_id, reply_comment) + comment_id = issue.thread_ids[count] + handler.reply_to_comment(issue.number, comment_id, reply_comment) except (json.JSONDecodeError, TypeError): msg = f'Error occured when replying to threads; success explanations {additional_message}' - send_comment_msg(base_url, github_issue.number, github_token, msg) + handler.send_comment_msg(issue.number, msg) return pr_url @@ -536,8 +450,9 @@ def update_existing_pull_request( def process_single_issue( output_dir: str, resolver_output: ResolverOutput, - github_token: str, - github_username: str, + token: str, + username: str, + platform: Platform, pr_type: str, llm_config: LLMConfig, fork_owner: str | None, @@ -577,18 +492,20 @@ def process_single_issue( if issue_type == 'pr': update_existing_pull_request( - github_issue=resolver_output.issue, - github_token=github_token, - github_username=github_username, + issue=resolver_output.issue, + token=token, + username=username, + platform=platform, patch_dir=patched_repo_dir, additional_message=resolver_output.result_explanation, llm_config=llm_config, ) else: send_pull_request( - github_issue=resolver_output.issue, - github_token=github_token, - github_username=github_username, + issue=resolver_output.issue, + token=token, + username=username, + platform=platform, patch_dir=patched_repo_dir, pr_type=pr_type, fork_owner=fork_owner, @@ -601,8 +518,9 @@ def process_single_issue( def process_all_successful_issues( output_dir: str, - github_token: str, - github_username: str, + token: str, + username: str, + platform: Platform, pr_type: str, llm_config: LLMConfig, fork_owner: str | None, @@ -614,8 +532,9 @@ def process_all_successful_issues( process_single_issue( output_dir, resolver_output, - github_token, - github_username, + token, + username, + platform, pr_type, llm_config, fork_owner, @@ -625,18 +544,20 @@ def process_all_successful_issues( def main(): - parser = argparse.ArgumentParser(description='Send a pull request to Github.') + parser = argparse.ArgumentParser( + description='Send a pull request to Github or Gitlab.' + ) parser.add_argument( - '--github-token', + '--token', type=str, default=None, - help='Github token to access the repository.', + help='token to access the repository.', ) parser.add_argument( - '--github-username', + '--username', type=str, default=None, - help='Github username to access the repository.', + help='username to access the repository.', ) parser.add_argument( '--output-dir', @@ -695,7 +616,7 @@ def main(): parser.add_argument( '--reviewer', type=str, - help='GitHub username of the person to request review from', + help='GitHub or GitLab username of the person to request review from', default=None, ) parser.add_argument( @@ -706,18 +627,16 @@ def main(): ) my_args = parser.parse_args() - github_token = ( - my_args.github_token if my_args.github_token else os.getenv('GITHUB_TOKEN') - ) - if not github_token: + token = my_args.token or os.getenv('GITHUB_TOKEN') or os.getenv('GITLAB_TOKEN') + if not token: raise ValueError( - 'Github token is not set, set via --github-token or GITHUB_TOKEN environment variable.' + 'token is not set, set via --token or GITHUB_TOKEN or GITLAB_TOKEN environment variable.' ) - github_username = ( - my_args.github_username - if my_args.github_username - else os.getenv('GITHUB_USERNAME') - ) + username = my_args.username if my_args.username else os.getenv('GIT_USERNAME') + + platform = identify_token(token) + if platform == Platform.INVALID: + raise ValueError('Token is invalid.') api_key = my_args.llm_api_key or os.environ['LLM_API_KEY'] llm_config = LLMConfig( @@ -730,12 +649,13 @@ def main(): raise ValueError(f'Output directory {my_args.output_dir} does not exist.') if my_args.issue_number == 'all_successful': - if not github_username: - raise ValueError('Github username is required.') + if not username: + raise ValueError('username is required.') process_all_successful_issues( my_args.output_dir, - github_token, - github_username, + token, + username, + platform, my_args.pr_type, llm_config, my_args.fork_owner, @@ -746,13 +666,14 @@ def main(): issue_number = int(my_args.issue_number) output_path = os.path.join(my_args.output_dir, 'output.jsonl') resolver_output = load_single_resolver_output(output_path, issue_number) - if not github_username: - raise ValueError('Github username is required.') + if not username: + raise ValueError('username is required.') process_single_issue( my_args.output_dir, resolver_output, - github_token, - github_username, + token, + username, + platform, my_args.pr_type, llm_config, my_args.fork_owner, diff --git a/openhands/resolver/utils.py b/openhands/resolver/utils.py index 583026455945..b0e25861ccb7 100644 --- a/openhands/resolver/utils.py +++ b/openhands/resolver/utils.py @@ -2,9 +2,12 @@ import logging import multiprocessing as mp import os +import re +from enum import Enum from typing import Callable import pandas as pd +import requests from openhands.controller.state.state import State from openhands.core.logger import get_console_handler @@ -13,6 +16,47 @@ from openhands.events.action.message import MessageAction +class Platform(Enum): + INVALID = 0 + GITHUB = 1 + GITLAB = 2 + + +def identify_token(token: str) -> Platform: + """ + Identifies whether a token belongs to GitHub or GitLab. + + Parameters: + token (str): The personal access token to check. + + Returns: + Platform: "GitHub" if the token is valid for GitHub, + "GitLab" if the token is valid for GitLab, + "Invalid" if the token is not recognized by either. + """ + github_url = 'https://api.github.com/user' + github_headers = {'Authorization': f'token {token}'} + + try: + github_response = requests.get(github_url, headers=github_headers, timeout=5) + if github_response.status_code == 200: + return Platform.GITHUB + except requests.RequestException as e: + print(f'Error connecting to GitHub API: {e}') + + gitlab_url = 'https://gitlab.com/api/v4/user' + gitlab_headers = {'Authorization': f'Bearer {token}'} + + try: + gitlab_response = requests.get(gitlab_url, headers=gitlab_headers, timeout=5) + if gitlab_response.status_code == 200: + return Platform.GITLAB + except requests.RequestException as e: + print(f'Error connecting to GitLab API: {e}') + + return Platform.INVALID + + def codeact_user_response( state: State, encapsulate_solution: bool = False, @@ -137,3 +181,45 @@ def reset_logger_for_multiprocessing( logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ) logger.addHandler(file_handler) + + +def extract_image_urls(issue_body: str) -> list[str]: + # Regular expression to match Markdown image syntax ![alt text](image_url) + image_pattern = r'!\[.*?\]\((https?://[^\s)]+)\)' + return re.findall(image_pattern, issue_body) + + +def extract_issue_references(body: str) -> list[int]: + # First, remove code blocks as they may contain false positives + body = re.sub(r'```.*?```', '', body, flags=re.DOTALL) + + # Remove inline code + body = re.sub(r'`[^`]*`', '', body) + + # Remove URLs that contain hash symbols + body = re.sub(r'https?://[^\s)]*#\d+[^\s)]*', '', body) + + # Now extract issue numbers, making sure they're not part of other text + # The pattern matches #number that: + # 1. Is at the start of text or after whitespace/punctuation + # 2. Is followed by whitespace, punctuation, or end of text + # 3. Is not part of a URL + pattern = r'(?:^|[\s\[({]|[^\w#])#(\d+)(?=[\s,.\])}]|$)' + return [int(match) for match in re.findall(pattern, body)] + + +def get_unique_uid(start_uid=1000): + existing_uids = set() + with open('/etc/passwd', 'r') as passwd_file: + for line in passwd_file: + parts = line.split(':') + if len(parts) > 2: + try: + existing_uids.add(int(parts[2])) + except ValueError: + continue + + while start_uid in existing_uids: + start_uid += 1 + + return start_uid diff --git a/tests/unit/resolver/test_guess_success.py b/tests/unit/resolver/github/test_guess_success.py similarity index 88% rename from tests/unit/resolver/test_guess_success.py rename to tests/unit/resolver/github/test_guess_success.py index 5f0feef8d110..bef1e1f49bcf 100644 --- a/tests/unit/resolver/test_guess_success.py +++ b/tests/unit/resolver/github/test_guess_success.py @@ -4,13 +4,17 @@ from openhands.core.config import LLMConfig from openhands.events.action.message import MessageAction from openhands.llm import LLM -from openhands.resolver.github_issue import GithubIssue -from openhands.resolver.issue_definitions import IssueHandler, PRHandler +from openhands.resolver.interfaces.github import GithubIssueHandler, GithubPRHandler +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) def test_guess_success_multiline_explanation(): # Mock data - issue = GithubIssue( + issue = Issue( owner='test', repo='test', number=1, @@ -44,7 +48,9 @@ def test_guess_success_multiline_explanation(): # Use patch to mock the LLM completion call with patch.object(LLM, 'completion', return_value=mock_response) as mock_completion: # Create a handler instance - handler = IssueHandler('test', 'test', 'test', llm_config) + handler = ServiceContextIssue( + GithubIssueHandler('test', 'test', 'test'), llm_config + ) # Call guess_success success, _, explanation = handler.guess_success(issue, history) @@ -64,10 +70,10 @@ def test_guess_success_multiline_explanation(): def test_pr_handler_guess_success_with_thread_comments(): # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR(GithubPRHandler('test', 'test', 'test'), llm_config) # Create a mock issue with thread comments but no review comments - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -114,10 +120,12 @@ def test_pr_handler_guess_success_with_thread_comments(): def test_pr_handler_guess_success_only_review_comments(): # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create a mock issue with only review comments - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -165,10 +173,10 @@ def test_pr_handler_guess_success_only_review_comments(): def test_pr_handler_guess_success_no_comments(): # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR(GithubPRHandler('test', 'test', 'test'), llm_config) # Create a mock issue with no comments - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, diff --git a/tests/unit/resolver/test_issue_handler.py b/tests/unit/resolver/github/test_issue_handler.py similarity index 94% rename from tests/unit/resolver/test_issue_handler.py rename to tests/unit/resolver/github/test_issue_handler.py index 56f012fd77c3..4d21e5de696a 100644 --- a/tests/unit/resolver/test_issue_handler.py +++ b/tests/unit/resolver/github/test_issue_handler.py @@ -1,8 +1,12 @@ from unittest.mock import MagicMock, patch from openhands.core.config import LLMConfig -from openhands.resolver.github_issue import ReviewThread -from openhands.resolver.issue_definitions import IssueHandler, PRHandler +from openhands.resolver.interfaces.github import GithubIssueHandler, GithubPRHandler +from openhands.resolver.interfaces.issue import ReviewThread +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) def test_get_converted_issues_initializes_review_comments(): @@ -27,7 +31,9 @@ def test_get_converted_issues_initializes_review_comments(): # Create an instance of IssueHandler llm_config = LLMConfig(model='test', api_key='test') - handler = IssueHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextIssue( + GithubIssueHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues issues = handler.get_converted_issues(issue_numbers=[1]) @@ -57,7 +63,6 @@ def test_get_converted_issues_handles_empty_body(): # Mock the response for comments mock_comments_response = MagicMock() mock_comments_response.json.return_value = [] - # Set up the mock to return different responses mock_get.side_effect = [ mock_issues_response, @@ -67,7 +72,9 @@ def test_get_converted_issues_handles_empty_body(): # Create an instance of IssueHandler llm_config = LLMConfig(model='test', api_key='test') - handler = IssueHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextIssue( + GithubIssueHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues issues = handler.get_converted_issues(issue_numbers=[1]) @@ -148,7 +155,9 @@ def test_pr_handler_get_converted_issues_with_comments(): # Create an instance of PRHandler llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues prs = handler.get_converted_issues(issue_numbers=[1]) @@ -185,10 +194,12 @@ def test_get_issue_comments_with_specific_comment_id(): # Create an instance of IssueHandler llm_config = LLMConfig(model='test', api_key='test') - handler = IssueHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextIssue( + GithubIssueHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get comments with a specific comment_id - specific_comment = handler._get_issue_comments(issue_number=1, comment_id=123) + specific_comment = handler.get_issue_comments(issue_number=1, comment_id=123) # Verify only the specific comment is returned assert specific_comment == ['First comment'] @@ -273,7 +284,9 @@ def test_pr_handler_get_converted_issues_with_specific_thread_comment(): # Create an instance of PRHandler llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues prs = handler.get_converted_issues( @@ -376,7 +389,9 @@ def test_pr_handler_get_converted_issues_with_specific_review_thread_comment(): # Create an instance of PRHandler llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues prs = handler.get_converted_issues( @@ -499,7 +514,9 @@ def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs(): # Create an instance of PRHandler llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues prs = handler.get_converted_issues( @@ -599,7 +616,9 @@ def test_pr_handler_get_converted_issues_with_duplicate_issue_refs(): # Create an instance of PRHandler llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Get converted issues prs = handler.get_converted_issues(issue_numbers=[1]) diff --git a/tests/unit/resolver/test_issue_handler_error_handling.py b/tests/unit/resolver/github/test_issue_handler_error_handling.py similarity index 86% rename from tests/unit/resolver/test_issue_handler_error_handling.py rename to tests/unit/resolver/github/test_issue_handler_error_handling.py index 93a98437168e..51e2fbb50728 100644 --- a/tests/unit/resolver/test_issue_handler_error_handling.py +++ b/tests/unit/resolver/github/test_issue_handler_error_handling.py @@ -7,8 +7,12 @@ from openhands.core.config import LLMConfig from openhands.events.action.message import MessageAction from openhands.llm.llm import LLM -from openhands.resolver.github_issue import GithubIssue -from openhands.resolver.issue_definitions import IssueHandler, PRHandler +from openhands.resolver.interfaces.github import GithubIssueHandler, GithubPRHandler +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) @pytest.fixture(autouse=True) @@ -33,7 +37,9 @@ def default_config(): def test_handle_nonexistent_issue_reference(): llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Mock the requests.get to simulate a 404 error mock_response = MagicMock() @@ -43,7 +49,7 @@ def test_handle_nonexistent_issue_reference(): with patch('requests.get', return_value=mock_response): # Call the method with a non-existent issue reference - result = handler._PRHandler__get_context_from_external_issues_references( + result = handler._strategy.get_context_from_external_issues_references( closing_issues=[], closing_issue_numbers=[], issue_body='This references #999999', # Non-existent issue @@ -58,7 +64,9 @@ def test_handle_nonexistent_issue_reference(): def test_handle_rate_limit_error(): llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Mock the requests.get to simulate a rate limit error mock_response = MagicMock() @@ -68,7 +76,7 @@ def test_handle_rate_limit_error(): with patch('requests.get', return_value=mock_response): # Call the method with an issue reference - result = handler._PRHandler__get_context_from_external_issues_references( + result = handler._strategy.get_context_from_external_issues_references( closing_issues=[], closing_issue_numbers=[], issue_body='This references #123', @@ -83,14 +91,16 @@ def test_handle_rate_limit_error(): def test_handle_network_error(): llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Mock the requests.get to simulate a network error with patch( 'requests.get', side_effect=requests.exceptions.ConnectionError('Network Error') ): # Call the method with an issue reference - result = handler._PRHandler__get_context_from_external_issues_references( + result = handler._strategy.get_context_from_external_issues_references( closing_issues=[], closing_issue_numbers=[], issue_body='This references #123', @@ -105,7 +115,9 @@ def test_handle_network_error(): def test_successful_issue_reference(): llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Mock a successful response mock_response = MagicMock() @@ -114,7 +126,7 @@ def test_successful_issue_reference(): with patch('requests.get', return_value=mock_response): # Call the method with an issue reference - result = handler._PRHandler__get_context_from_external_issues_references( + result = handler._strategy.get_context_from_external_issues_references( closing_issues=[], closing_issue_numbers=[], issue_body='This references #123', @@ -201,11 +213,13 @@ def test_guess_success_rate_limit_wait_time(mock_litellm_completion, default_con ] llm = LLM(config=default_config) - handler = IssueHandler('test-owner', 'test-repo', 'test-token', default_config) + handler = ServiceContextIssue( + GithubIssueHandler('test-owner', 'test-repo', 'test-token'), default_config + ) handler.llm = llm # Mock issue and history - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -241,11 +255,13 @@ def test_guess_success_exhausts_retries(mock_completion, default_config): # Initialize LLM and handler llm = LLM(config=default_config) - handler = PRHandler('test-owner', 'test-repo', 'test-token', default_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), default_config + ) handler.llm = llm # Mock issue and history - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, diff --git a/tests/unit/resolver/test_pr_handler_guess_success.py b/tests/unit/resolver/github/test_pr_handler_guess_success.py similarity index 92% rename from tests/unit/resolver/test_pr_handler_guess_success.py rename to tests/unit/resolver/github/test_pr_handler_guess_success.py index c8e6bbe62c09..e94b0bdeb9f0 100644 --- a/tests/unit/resolver/test_pr_handler_guess_success.py +++ b/tests/unit/resolver/github/test_pr_handler_guess_success.py @@ -6,14 +6,18 @@ from openhands.core.config import LLMConfig from openhands.events.action.message import MessageAction from openhands.llm.llm import LLM -from openhands.resolver.github_issue import GithubIssue, ReviewThread -from openhands.resolver.issue_definitions import PRHandler +from openhands.resolver.interfaces.github import GithubPRHandler +from openhands.resolver.interfaces.issue import Issue, ReviewThread +from openhands.resolver.interfaces.issue_definitions import ServiceContextPR @pytest.fixture def pr_handler(): llm_config = LLMConfig(model='test-model') - return PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + return handler @pytest.fixture @@ -37,10 +41,12 @@ def test_guess_success_review_threads_litellm_call(): """Test that the completion() call for review threads contains the expected content.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create a mock issue with review threads - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -142,10 +148,12 @@ def test_guess_success_thread_comments_litellm_call(): """Test that the completion() call for thread comments contains the expected content.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create a mock issue with thread comments - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -215,7 +223,9 @@ def test_check_feedback_with_llm(): """Test the _check_feedback_with_llm helper function.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Test cases for different LLM responses test_cases = [ @@ -255,7 +265,9 @@ def test_check_review_thread_with_git_patch(): """Test that git patch from complete_runtime is included in the prompt.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create test data review_thread = ReviewThread( @@ -312,7 +324,9 @@ def test_check_review_thread(): """Test the _check_review_thread helper function.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create test data review_thread = ReviewThread( @@ -367,7 +381,9 @@ def test_check_thread_comments_with_git_patch(): """Test that git patch from complete_runtime is included in the prompt.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create test data thread_comments = [ @@ -422,7 +438,9 @@ def test_check_thread_comments(): """Test the _check_thread_comments helper function.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create test data thread_comments = [ @@ -475,7 +493,9 @@ def test_check_review_comments_with_git_patch(): """Test that git patch from complete_runtime is included in the prompt.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create test data review_comments = [ @@ -530,7 +550,9 @@ def test_check_review_comments(): """Test the _check_review_comments helper function.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create test data review_comments = [ @@ -583,10 +605,12 @@ def test_guess_success_review_comments_litellm_call(): """Test that the completion() call for review comments contains the expected content.""" # Create a PR handler instance llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config) + handler = ServiceContextPR( + GithubPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) # Create a mock issue with review comments - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -627,7 +651,6 @@ def test_guess_success_review_comments_litellm_call(): ) ] - # Test the guess_success method with patch.object(LLM, 'completion') as mock_completion: mock_completion.return_value = mock_response success, success_list, explanation = handler.guess_success(issue, history) diff --git a/tests/unit/resolver/test_pr_title_escaping.py b/tests/unit/resolver/github/test_pr_title_escaping.py similarity index 93% rename from tests/unit/resolver/test_pr_title_escaping.py rename to tests/unit/resolver/github/test_pr_title_escaping.py index 9cc5d90bc4b0..336d1522781c 100644 --- a/tests/unit/resolver/test_pr_title_escaping.py +++ b/tests/unit/resolver/github/test_pr_title_escaping.py @@ -2,8 +2,9 @@ import subprocess import tempfile -from openhands.resolver.github_issue import GithubIssue +from openhands.resolver.interfaces.issue import Issue from openhands.resolver.send_pull_request import make_commit +from openhands.resolver.utils import Platform def test_commit_message_with_quotes(): @@ -19,7 +20,7 @@ def test_commit_message_with_quotes(): subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True) # Create a test issue with problematic title - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=123, @@ -89,7 +90,7 @@ def raise_for_status(self): monkeypatch.setattr('requests.post', mock_post) monkeypatch.setattr('requests.get', lambda *args, **kwargs: MockGetResponse()) monkeypatch.setattr( - 'openhands.resolver.send_pull_request.branch_exists', + 'openhands.resolver.interfaces.github.GithubIssueHandler.branch_exists', lambda *args, **kwargs: False, ) @@ -135,7 +136,7 @@ def mock_run(*args, **kwargs): # Create a test issue with problematic title print('Creating test issue...') - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=123, @@ -156,9 +157,10 @@ def mock_run(*args, **kwargs): from openhands.resolver.send_pull_request import send_pull_request send_pull_request( - github_issue=issue, - github_token='dummy-token', - github_username='test-user', + issue=issue, + token='dummy-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=temp_dir, pr_type='ready', ) diff --git a/tests/unit/resolver/test_resolve_issues.py b/tests/unit/resolver/github/test_resolve_issues.py similarity index 93% rename from tests/unit/resolver/test_resolve_issues.py rename to tests/unit/resolver/github/test_resolve_issues.py index fcc12f1d0698..d46ddf732fb4 100644 --- a/tests/unit/resolver/test_resolve_issues.py +++ b/tests/unit/resolver/github/test_resolve_issues.py @@ -12,14 +12,19 @@ NullObservation, ) from openhands.llm.llm import LLM -from openhands.resolver.github_issue import GithubIssue, ReviewThread -from openhands.resolver.issue_definitions import IssueHandler, PRHandler +from openhands.resolver.interfaces.github import GithubIssueHandler, GithubPRHandler +from openhands.resolver.interfaces.issue import Issue, ReviewThread +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) from openhands.resolver.resolve_issue import ( complete_runtime, initialize_runtime, process_issue, ) from openhands.resolver.resolver_output import ResolverOutput +from openhands.resolver.utils import Platform @pytest.fixture @@ -76,7 +81,7 @@ def test_initialize_runtime(): ), ] - initialize_runtime(mock_runtime) + initialize_runtime(mock_runtime, Platform.GITHUB) assert mock_runtime.run_action.call_count == 2 mock_runtime.run_action.assert_any_call(CmdRunAction(command='cd /workspace')) @@ -103,6 +108,7 @@ async def test_resolve_issue_no_issues_found(): repo='test-repo', token='test-token', username='test-user', + platform=Platform.GITHUB, max_iterations=5, output_dir='/tmp', llm_config=LLMConfig(model='test', api_key='test'), @@ -122,7 +128,9 @@ async def test_resolve_issue_no_issues_found(): def test_download_issues_from_github(): llm_config = LLMConfig(model='test', api_key='test') - handler = IssueHandler('owner', 'repo', 'token', llm_config) + handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), llm_config + ) mock_issues_response = MagicMock() mock_issues_response.json.side_effect = [ @@ -154,7 +162,7 @@ def get_mock_response(url, *args, **kwargs): assert len(issues) == 2 assert handler.issue_type == 'issue' - assert all(isinstance(issue, GithubIssue) for issue in issues) + assert all(isinstance(issue, Issue) for issue in issues) assert [issue.number for issue in issues] == [1, 3] assert [issue.title for issue in issues] == ['Issue 1', 'Issue 2'] assert [issue.review_comments for issue in issues] == [None, None] @@ -164,7 +172,7 @@ def get_mock_response(url, *args, **kwargs): def test_download_pr_from_github(): llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('owner', 'repo', 'token', llm_config) + handler = ServiceContextPR(GithubPRHandler('owner', 'repo', 'token'), llm_config) mock_pr_response = MagicMock() mock_pr_response.json.side_effect = [ [ @@ -268,7 +276,7 @@ def get_mock_response(url, *args, **kwargs): assert len(issues) == 3 assert handler.issue_type == 'pr' - assert all(isinstance(issue, GithubIssue) for issue in issues) + assert all(isinstance(issue, Issue) for issue in issues) assert [issue.number for issue in issues] == [1, 2, 3] assert [issue.title for issue in issues] == ['PR 1', 'My PR', 'PR 3'] assert [issue.head_branch for issue in issues] == ['b1', 'b2', 'b3'] @@ -307,7 +315,7 @@ async def test_complete_runtime(): create_cmd_output(exit_code=0, content='git diff content', command='git apply'), ] - result = await complete_runtime(mock_runtime, 'base_commit_hash') + result = await complete_runtime(mock_runtime, 'base_commit_hash', Platform.GITHUB) assert result == {'git_patch': 'git diff content'} assert mock_runtime.run_action.call_count == 5 @@ -323,7 +331,7 @@ async def test_process_issue(mock_output_dir, mock_prompt_template): handler_instance = MagicMock() # Set up test data - issue = GithubIssue( + issue = Issue( owner='test_owner', repo='test_repo', number=1, @@ -434,6 +442,7 @@ async def test_process_issue(mock_output_dir, mock_prompt_template): # Call the function result = await process_issue( issue, + Platform.GITHUB, base_commit, max_iterations, llm_config, @@ -470,7 +479,7 @@ async def test_process_issue(mock_output_dir, mock_prompt_template): def test_get_instruction(mock_prompt_template, mock_followup_prompt_template): - issue = GithubIssue( + issue = Issue( owner='test_owner', repo='test_repo', number=123, @@ -478,7 +487,9 @@ def test_get_instruction(mock_prompt_template, mock_followup_prompt_template): body='This is a test issue refer to image ![First Image](https://sampleimage.com/image1.png)', ) mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) instruction, images_urls = issue_handler.get_instruction( issue, mock_prompt_template, None ) @@ -488,7 +499,7 @@ def test_get_instruction(mock_prompt_template, mock_followup_prompt_template): assert issue_handler.issue_type == 'issue' assert instruction == expected_instruction - issue = GithubIssue( + issue = Issue( owner='test_owner', repo='test_repo', number=123, @@ -506,7 +517,9 @@ def test_get_instruction(mock_prompt_template, mock_followup_prompt_template): ], ) - pr_handler = PRHandler('owner', 'repo', 'token', mock_llm_config) + pr_handler = ServiceContextPR( + GithubPRHandler('owner', 'repo', 'token'), mock_llm_config + ) instruction, images_urls = pr_handler.get_instruction( issue, mock_followup_prompt_template, None ) @@ -518,7 +531,7 @@ def test_get_instruction(mock_prompt_template, mock_followup_prompt_template): def test_file_instruction(): - issue = GithubIssue( + issue = Issue( owner='test_owner', repo='test_repo', number=123, @@ -530,7 +543,9 @@ def test_file_instruction(): prompt = f.read() # Test without thread comments mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) instruction, images_urls = issue_handler.get_instruction(issue, prompt, None) expected_instruction = """Please fix the following issue for the repository in /workspace. An environment has been set up for you to start working. You may assume all necessary tools are installed. @@ -550,7 +565,7 @@ def test_file_instruction(): def test_file_instruction_with_repo_instruction(): - issue = GithubIssue( + issue = Issue( owner='test_owner', repo='test_repo', number=123, @@ -568,7 +583,9 @@ def test_file_instruction_with_repo_instruction(): repo_instruction = f.read() mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) instruction, image_urls = issue_handler.get_instruction( issue, prompt, repo_instruction ) @@ -597,7 +614,7 @@ def test_file_instruction_with_repo_instruction(): def test_guess_success(): - mock_issue = GithubIssue( + mock_issue = Issue( owner='test_owner', repo='test_repo', number=1, @@ -615,7 +632,9 @@ def test_guess_success(): ) ) ] - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) with patch.object( LLM, 'completion', MagicMock(return_value=mock_completion_response) @@ -630,7 +649,7 @@ def test_guess_success(): def test_guess_success_with_thread_comments(): - mock_issue = GithubIssue( + mock_issue = Issue( owner='test_owner', repo='test_repo', number=1, @@ -653,7 +672,9 @@ def test_guess_success_with_thread_comments(): ) ) ] - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) with patch.object( LLM, 'completion', MagicMock(return_value=mock_completion_response) @@ -669,7 +690,7 @@ def test_guess_success_with_thread_comments(): def test_instruction_with_thread_comments(): # Create an issue with thread comments - issue = GithubIssue( + issue = Issue( owner='test_owner', repo='test_repo', number=123, @@ -687,7 +708,9 @@ def test_instruction_with_thread_comments(): prompt = f.read() llm_config = LLMConfig(model='test', api_key='test') - issue_handler = IssueHandler('owner', 'repo', 'token', llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), llm_config + ) instruction, images_urls = issue_handler.get_instruction(issue, prompt, None) # Verify that thread comments are included in the instruction @@ -699,7 +722,7 @@ def test_instruction_with_thread_comments(): def test_guess_success_failure(): - mock_issue = GithubIssue( + mock_issue = Issue( owner='test_owner', repo='test_repo', number=1, @@ -722,7 +745,9 @@ def test_guess_success_failure(): ) ) ] - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) with patch.object( LLM, 'completion', MagicMock(return_value=mock_completion_response) @@ -737,7 +762,7 @@ def test_guess_success_failure(): def test_guess_success_negative_case(): - mock_issue = GithubIssue( + mock_issue = Issue( owner='test_owner', repo='test_repo', number=1, @@ -755,7 +780,9 @@ def test_guess_success_negative_case(): ) ) ] - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) with patch.object( LLM, 'completion', MagicMock(return_value=mock_completion_response) @@ -770,7 +797,7 @@ def test_guess_success_negative_case(): def test_guess_success_invalid_output(): - mock_issue = GithubIssue( + mock_issue = Issue( owner='test_owner', repo='test_repo', number=1, @@ -784,7 +811,9 @@ def test_guess_success_invalid_output(): mock_completion_response.choices = [ MagicMock(message=MagicMock(content='This is not a valid output')) ] - issue_handler = IssueHandler('owner', 'repo', 'token', mock_llm_config) + issue_handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) with patch.object( LLM, 'completion', MagicMock(return_value=mock_completion_response) @@ -803,7 +832,7 @@ def test_guess_success_invalid_output(): def test_download_pr_with_review_comments(): llm_config = LLMConfig(model='test', api_key='test') - handler = PRHandler('owner', 'repo', 'token', llm_config) + handler = ServiceContextPR(GithubPRHandler('owner', 'repo', 'token'), llm_config) mock_pr_response = MagicMock() mock_pr_response.json.side_effect = [ [ @@ -854,7 +883,7 @@ def get_mock_response(url, *args, **kwargs): assert len(issues) == 1 assert handler.issue_type == 'pr' - assert isinstance(issues[0], GithubIssue) + assert isinstance(issues[0], Issue) assert issues[0].number == 1 assert issues[0].title == 'PR 1' assert issues[0].head_branch == 'b1' @@ -870,7 +899,9 @@ def get_mock_response(url, *args, **kwargs): def test_download_issue_with_specific_comment(): llm_config = LLMConfig(model='test', api_key='test') - handler = IssueHandler('owner', 'repo', 'token', llm_config) + handler = ServiceContextIssue( + GithubIssueHandler('owner', 'repo', 'token'), llm_config + ) # Define the specific comment_id to filter specific_comment_id = 101 diff --git a/tests/unit/resolver/test_send_pull_request.py b/tests/unit/resolver/github/test_send_pull_request.py similarity index 89% rename from tests/unit/resolver/test_send_pull_request.py rename to tests/unit/resolver/github/test_send_pull_request.py index c03738cf9abf..62d5c5d8f4f2 100644 --- a/tests/unit/resolver/test_send_pull_request.py +++ b/tests/unit/resolver/github/test_send_pull_request.py @@ -5,8 +5,9 @@ import pytest from openhands.core.config import LLMConfig -from openhands.resolver.github_issue import ReviewThread -from openhands.resolver.resolver_output import GithubIssue, ResolverOutput +from openhands.resolver.interfaces.github import GithubIssueHandler +from openhands.resolver.interfaces.issue import ReviewThread +from openhands.resolver.resolver_output import Issue, ResolverOutput from openhands.resolver.send_pull_request import ( apply_patch, initialize_repo, @@ -14,10 +15,10 @@ make_commit, process_all_successful_issues, process_single_issue, - reply_to_comment, send_pull_request, update_existing_pull_request, ) +from openhands.resolver.utils import Platform @pytest.fixture @@ -36,8 +37,8 @@ def mock_output_dir(): @pytest.fixture -def mock_github_issue(): - return GithubIssue( +def mock_issue(): + return Issue( number=42, title='Test Issue', owner='test-owner', @@ -241,7 +242,7 @@ def test_initialize_repo(mock_output_dir): assert f.read() == 'hello world' -@patch('openhands.resolver.send_pull_request.reply_to_comment') +@patch('openhands.resolver.interfaces.github.GithubIssueHandler.reply_to_comment') @patch('requests.post') @patch('subprocess.run') @patch('openhands.resolver.send_pull_request.LLM') @@ -252,7 +253,7 @@ def test_update_existing_pull_request( mock_reply_to_comment, ): # Arrange: Set up test data - github_issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=1, @@ -261,8 +262,8 @@ def test_update_existing_pull_request( thread_ids=['comment1', 'comment2'], head_branch='test-branch', ) - github_token = 'test-token' - github_username = 'test-user' + token = 'test-token' + username = 'test-user' patch_dir = '/path/to/patch' additional_message = '["Fixed bug in function A", "Updated documentation for B"]' @@ -285,9 +286,10 @@ def test_update_existing_pull_request( # Act: Call the function without comment_message to test auto-generation result = update_existing_pull_request( - github_issue, - github_token, - github_username, + issue, + token, + username, + Platform.GITHUB, patch_dir, llm_config, comment_message=None, @@ -297,20 +299,20 @@ def test_update_existing_pull_request( # Assert: Check if the git push command was executed push_command = ( f'git -C {patch_dir} push ' - f'https://{github_username}:{github_token}@github.com/' - f'{github_issue.owner}/{github_issue.repo}.git {github_issue.head_branch}' + f'https://{username}:{token}@github.com/' + f'{issue.owner}/{issue.repo}.git {issue.head_branch}' ) mock_subprocess_run.assert_called_once_with( push_command, shell=True, capture_output=True, text=True ) # Assert: Check if the auto-generated comment was posted to the PR - comment_url = f'https://api.github.com/repos/{github_issue.owner}/{github_issue.repo}/issues/{github_issue.number}/comments' + comment_url = f'https://api.github.com/repos/{issue.owner}/{issue.repo}/issues/{issue.number}/comments' expected_comment = 'This is an issue resolution.' mock_requests_post.assert_called_once_with( comment_url, headers={ - 'Authorization': f'token {github_token}', + 'Authorization': f'token {token}', 'Accept': 'application/vnd.github.v3+json', }, json={'body': expected_comment}, @@ -319,15 +321,14 @@ def test_update_existing_pull_request( # Assert: Check if the reply_to_comment function was called for each thread ID mock_reply_to_comment.assert_has_calls( [ - call(github_token, 'comment1', 'Fixed bug in function A'), - call(github_token, 'comment2', 'Updated documentation for B'), + call(issue.number, 'comment1', 'Fixed bug in function A'), + call(issue.number, 'comment2', 'Updated documentation for B'), ] ) # Assert: Check the returned PR URL assert ( - result - == f'https://github.com/{github_issue.owner}/{github_issue.repo}/pull/{github_issue.number}' + result == f'https://github.com/{issue.owner}/{issue.repo}/pull/{issue.number}' ) @@ -351,7 +352,8 @@ def test_send_pull_request( mock_get, mock_post, mock_run, - mock_github_issue, + mock_issue, + mock_llm_config, mock_output_dir, pr_type, target_branch, @@ -383,9 +385,10 @@ def test_send_pull_request( # Call the function result = send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type=pr_type, target_branch=target_branch, @@ -441,7 +444,7 @@ def test_send_pull_request( @patch('requests.post') @patch('requests.get') def test_send_pull_request_with_reviewer( - mock_get, mock_post, mock_run, mock_github_issue, mock_output_dir + mock_get, mock_post, mock_run, mock_issue, mock_output_dir, mock_llm_config ): repo_path = os.path.join(mock_output_dir, 'repo') reviewer = 'test-reviewer' @@ -472,9 +475,10 @@ def test_send_pull_request_with_reviewer( # Call the function with reviewer result = send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='ready', reviewer=reviewer, @@ -504,7 +508,7 @@ def test_send_pull_request_with_reviewer( @patch('requests.post') @patch('requests.get') def test_send_pull_request_target_branch_with_fork( - mock_get, mock_post, mock_run, mock_github_issue, mock_output_dir + mock_get, mock_post, mock_run, mock_issue, mock_output_dir ): """Test that target_branch works correctly when using a fork.""" repo_path = os.path.join(mock_output_dir, 'repo') @@ -528,10 +532,11 @@ def test_send_pull_request_target_branch_with_fork( ] # Call the function with fork_owner and target_branch - result = send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='ready', fork_owner=fork_owner, @@ -540,27 +545,34 @@ def test_send_pull_request_target_branch_with_fork( # Assert API calls assert mock_get.call_count == 2 - + # Verify target branch was checked in original repo, not fork target_branch_check = mock_get.call_args_list[1] - assert target_branch_check[0][0] == f'https://api.github.com/repos/test-owner/test-repo/branches/{target_branch}' + assert ( + target_branch_check[0][0] + == f'https://api.github.com/repos/test-owner/test-repo/branches/{target_branch}' + ) # Check PR creation mock_post.assert_called_once() post_data = mock_post.call_args[1]['json'] assert post_data['base'] == target_branch # PR should target the specified branch - assert post_data['head'] == 'openhands-fix-issue-42' # Branch name should be standard + assert ( + post_data['head'] == 'openhands-fix-issue-42' + ) # Branch name should be standard # Check that push was to fork push_call = mock_run.call_args_list[1] - assert f'https://test-user:test-token@github.com/{fork_owner}/test-repo.git' in str(push_call) + assert f'https://test-user:test-token@github.com/{fork_owner}/test-repo.git' in str( + push_call + ) @patch('subprocess.run') @patch('requests.post') @patch('requests.get') def test_send_pull_request_target_branch_with_additional_message( - mock_get, mock_post, mock_run, mock_github_issue, mock_output_dir + mock_get, mock_post, mock_run, mock_issue, mock_output_dir ): """Test that target_branch works correctly with additional PR message.""" repo_path = os.path.join(mock_output_dir, 'repo') @@ -584,10 +596,11 @@ def test_send_pull_request_target_branch_with_additional_message( ] # Call the function with target_branch and additional_message - result = send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='ready', target_branch=target_branch, @@ -607,7 +620,7 @@ def test_send_pull_request_target_branch_with_additional_message( @patch('requests.get') def test_send_pull_request_invalid_target_branch( - mock_get, mock_github_issue, mock_output_dir + mock_get, mock_issue, mock_output_dir, mock_llm_config ): """Test that an error is raised when specifying a non-existent target branch""" repo_path = os.path.join(mock_output_dir, 'repo') @@ -623,9 +636,10 @@ def test_send_pull_request_invalid_target_branch( ValueError, match='Target branch nonexistent-branch does not exist' ): send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='ready', target_branch='nonexistent-branch', @@ -639,7 +653,7 @@ def test_send_pull_request_invalid_target_branch( @patch('requests.post') @patch('requests.get') def test_send_pull_request_git_push_failure( - mock_get, mock_post, mock_run, mock_github_issue, mock_output_dir + mock_get, mock_post, mock_run, mock_issue, mock_output_dir, mock_llm_config ): repo_path = os.path.join(mock_output_dir, 'repo') @@ -657,9 +671,10 @@ def test_send_pull_request_git_push_failure( RuntimeError, match='Failed to push changes to the remote repository' ): send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='ready', ) @@ -697,7 +712,7 @@ def test_send_pull_request_git_push_failure( @patch('requests.post') @patch('requests.get') def test_send_pull_request_permission_error( - mock_get, mock_post, mock_run, mock_github_issue, mock_output_dir + mock_get, mock_post, mock_run, mock_issue, mock_output_dir, mock_llm_config ): repo_path = os.path.join(mock_output_dir, 'repo') @@ -716,9 +731,10 @@ def test_send_pull_request_permission_error( RuntimeError, match='Failed to create pull request due to missing permissions.' ): send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='ready', ) @@ -729,12 +745,17 @@ def test_send_pull_request_permission_error( @patch('requests.post') -def test_reply_to_comment(mock_post): +def test_reply_to_comment(mock_post, mock_issue): # Arrange: set up the test data - github_token = 'test_token' + token = 'test_token' comment_id = 'test_comment_id' reply = 'This is a test reply.' + # Create an instance of GithubIssueHandler + handler = GithubIssueHandler( + owner='test-owner', repo='test-repo', token=token, username='test-user' + ) + # Mock the response from the GraphQL API mock_response = MagicMock() mock_response.status_code = 200 @@ -753,7 +774,7 @@ def test_reply_to_comment(mock_post): mock_post.return_value = mock_response # Act: call the function - reply_to_comment(github_token, comment_id, reply) + handler.reply_to_comment(mock_issue.number, comment_id, reply) # Assert: check that the POST request was made with the correct parameters query = """ @@ -778,7 +799,7 @@ def test_reply_to_comment(mock_post): 'https://api.github.com/graphql', json={'query': query, 'variables': expected_variables}, headers={ - 'Authorization': f'Bearer {github_token}', + 'Authorization': f'Bearer {token}', 'Content-Type': 'application/json', }, ) @@ -800,12 +821,12 @@ def test_process_single_pr_update( mock_llm_config, ): # Initialize test data - github_token = 'test_token' - github_username = 'test_user' + token = 'test_token' + username = 'test_user' pr_type = 'draft' resolver_output = ResolverOutput( - issue=GithubIssue( + issue=Issue( owner='test-owner', repo='test-repo', number=1, @@ -838,8 +859,9 @@ def test_process_single_pr_update( process_single_issue( mock_output_dir, resolver_output, - github_token, - github_username, + token, + username, + Platform.GITHUB, pr_type, mock_llm_config, None, @@ -855,9 +877,10 @@ def test_process_single_pr_update( f'{mock_output_dir}/patches/pr_1', resolver_output.issue, 'pr' ) mock_update_existing_pull_request.assert_called_once_with( - github_issue=resolver_output.issue, - github_token=github_token, - github_username=github_username, + issue=resolver_output.issue, + token=token, + username=username, + platform=Platform.GITHUB, patch_dir=f'{mock_output_dir}/patches/pr_1', additional_message='[Test success 1]', llm_config=mock_llm_config, @@ -877,12 +900,13 @@ def test_process_single_issue( mock_llm_config, ): # Initialize test data - github_token = 'test_token' - github_username = 'test_user' + token = 'test_token' + username = 'test_user' pr_type = 'draft' + platform = Platform.GITHUB resolver_output = ResolverOutput( - issue=GithubIssue( + issue=Issue( owner='test-owner', repo='test-repo', number=1, @@ -911,8 +935,9 @@ def test_process_single_issue( process_single_issue( mock_output_dir, resolver_output, - github_token, - github_username, + token, + username, + platform, pr_type, mock_llm_config, None, @@ -929,9 +954,10 @@ def test_process_single_issue( f'{mock_output_dir}/patches/issue_1', resolver_output.issue, 'issue' ) mock_send_pull_request.assert_called_once_with( - github_issue=resolver_output.issue, - github_token=github_token, - github_username=github_username, + issue=resolver_output.issue, + token=token, + username=username, + platform=platform, patch_dir=f'{mock_output_dir}/patches/issue_1', pr_type=pr_type, fork_owner=None, @@ -955,12 +981,12 @@ def test_process_single_issue_unsuccessful( mock_llm_config, ): # Initialize test data - github_token = 'test_token' - github_username = 'test_user' + token = 'test_token' + username = 'test_user' pr_type = 'draft' resolver_output = ResolverOutput( - issue=GithubIssue( + issue=Issue( owner='test-owner', repo='test-repo', number=1, @@ -983,8 +1009,9 @@ def test_process_single_issue_unsuccessful( process_single_issue( mock_output_dir, resolver_output, - github_token, - github_username, + token, + username, + Platform.GITHUB, pr_type, mock_llm_config, None, @@ -1006,7 +1033,7 @@ def test_process_all_successful_issues( ): # Create ResolverOutput objects with properly initialized GithubIssue instances resolver_output_1 = ResolverOutput( - issue=GithubIssue( + issue=Issue( owner='test-owner', repo='test-repo', number=1, @@ -1026,7 +1053,7 @@ def test_process_all_successful_issues( ) resolver_output_2 = ResolverOutput( - issue=GithubIssue( + issue=Issue( owner='test-owner', repo='test-repo', number=2, @@ -1046,7 +1073,7 @@ def test_process_all_successful_issues( ) resolver_output_3 = ResolverOutput( - issue=GithubIssue( + issue=Issue( owner='test-owner', repo='test-repo', number=3, @@ -1074,8 +1101,9 @@ def test_process_all_successful_issues( # Call the function process_all_successful_issues( 'output_dir', - 'github_token', - 'github_username', + 'token', + 'username', + Platform.GITHUB, 'draft', mock_llm_config, # llm_config None, # fork_owner @@ -1090,8 +1118,9 @@ def test_process_all_successful_issues( call( 'output_dir', resolver_output_1, - 'github_token', - 'github_username', + 'token', + 'username', + Platform.GITHUB, 'draft', mock_llm_config, None, @@ -1101,8 +1130,9 @@ def test_process_all_successful_issues( call( 'output_dir', resolver_output_3, - 'github_token', - 'github_username', + 'token', + 'username', + Platform.GITHUB, 'draft', mock_llm_config, None, @@ -1118,7 +1148,7 @@ def test_process_all_successful_issues( @patch('requests.get') @patch('subprocess.run') def test_send_pull_request_branch_naming( - mock_run, mock_get, mock_github_issue, mock_output_dir + mock_run, mock_get, mock_issue, mock_output_dir, mock_llm_config ): repo_path = os.path.join(mock_output_dir, 'repo') @@ -1138,9 +1168,10 @@ def test_send_pull_request_branch_naming( # Call the function result = send_pull_request( - github_issue=mock_github_issue, - github_token='test-token', - github_username='test-user', + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITHUB, patch_dir=repo_path, pr_type='branch', ) @@ -1181,11 +1212,13 @@ def test_send_pull_request_branch_naming( @patch('openhands.resolver.send_pull_request.process_all_successful_issues') @patch('openhands.resolver.send_pull_request.process_single_issue') @patch('openhands.resolver.send_pull_request.load_single_resolver_output') +@patch('openhands.resolver.send_pull_request.identify_token') @patch('os.path.exists') @patch('os.getenv') def test_main( mock_getenv, mock_path_exists, + mock_identify_token, mock_load_single_resolver_output, mock_process_single_issue, mock_process_all_successful_issues, @@ -1195,8 +1228,8 @@ def test_main( # Setup mock parser mock_args = MagicMock() - mock_args.github_token = None - mock_args.github_username = 'mock_username' + mock_args.token = None + mock_args.username = 'mock_username' mock_args.output_dir = '/mock/output' mock_args.pr_type = 'draft' mock_args.issue_number = '42' @@ -1222,9 +1255,13 @@ def test_main( mock_resolver_output = MagicMock() mock_load_single_resolver_output.return_value = mock_resolver_output + mock_identify_token.return_value = Platform.GITHUB + # Run main function main() + mock_identify_token.assert_called_with('mock_token') + llm_config = LLMConfig( model=mock_args.llm_model, base_url=mock_args.llm_base_url, @@ -1237,6 +1274,7 @@ def test_main( mock_resolver_output, 'mock_token', 'mock_username', + Platform.GITHUB, 'draft', llm_config, None, @@ -1259,6 +1297,7 @@ def test_main( '/mock/output', 'mock_token', 'mock_username', + Platform.GITHUB, 'draft', llm_config, None, @@ -1269,12 +1308,17 @@ def test_main( with pytest.raises(ValueError): main() + # Test for invalid token + mock_identify_token.return_value = Platform.INVALID + with pytest.raises(ValueError, match='Token is invalid.'): + main() + @patch('subprocess.run') def test_make_commit_escapes_issue_title(mock_subprocess_run): # Setup repo_dir = '/path/to/repo' - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=42, @@ -1314,7 +1358,7 @@ def test_make_commit_escapes_issue_title(mock_subprocess_run): def test_make_commit_no_changes(mock_subprocess_run): # Setup repo_dir = '/path/to/repo' - issue = GithubIssue( + issue = Issue( owner='test-owner', repo='test-repo', number=42, diff --git a/tests/unit/resolver/gitlab/test_gitlab_guess_success.py b/tests/unit/resolver/gitlab/test_gitlab_guess_success.py new file mode 100644 index 000000000000..9c4991c0913d --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_guess_success.py @@ -0,0 +1,202 @@ +import json +from unittest.mock import MagicMock, patch + +from openhands.core.config import LLMConfig +from openhands.events.action.message import MessageAction +from openhands.llm import LLM +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler, GitlabPRHandler +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) + + +def test_guess_success_multiline_explanation(): + # Mock data + issue = Issue( + owner='test', + repo='test', + number=1, + title='Test Issue', + body='Test body', + thread_comments=None, + review_comments=None, + ) + history = [MessageAction(content='Test message')] + llm_config = LLMConfig(model='test', api_key='test') + + # Create a mock response with multi-line explanation + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The PR successfully addressed the issue by: +- Fixed bug A +- Added test B +- Updated documentation C + +Automatic fix generated by OpenHands 🙌""" + ) + ) + ] + + # Use patch to mock the LLM completion call + with patch.object(LLM, 'completion', return_value=mock_response) as mock_completion: + # Create a handler instance + handler = ServiceContextIssue( + GitlabIssueHandler('test', 'test', 'test'), llm_config + ) + + # Call guess_success + success, _, explanation = handler.guess_success(issue, history) + + # Verify the results + assert success is True + assert 'The PR successfully addressed the issue by:' in explanation + assert 'Fixed bug A' in explanation + assert 'Added test B' in explanation + assert 'Updated documentation C' in explanation + assert 'Automatic fix generated by OpenHands' in explanation + + # Verify that LLM completion was called exactly once + mock_completion.assert_called_once() + + +def test_pr_handler_guess_success_with_thread_comments(): + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR(GitlabPRHandler('test', 'test', 'test'), llm_config) + + # Create a mock issue with thread comments but no review comments + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='Test Body', + thread_comments=['First comment', 'Second comment'], + closing_issues=['Issue description'], + review_comments=None, + thread_ids=None, + head_branch='test-branch', + ) + + # Create mock history + history = [MessageAction(content='Fixed the issue by implementing X and Y')] + + # Create mock LLM config + llm_config = LLMConfig(model='test-model', api_key='test-key') + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The changes successfully address the feedback.""" + ) + ) + ] + + # Test the guess_success method + with patch.object(LLM, 'completion', return_value=mock_response): + success, success_list, explanation = handler.guess_success(issue, history) + + # Verify the results + assert success is True + assert success_list == [True] + assert 'successfully address' in explanation + assert len(json.loads(explanation)) == 1 + + +def test_pr_handler_guess_success_only_review_comments(): + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create a mock issue with only review comments + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='Test Body', + thread_comments=None, + closing_issues=['Issue description'], + review_comments=['Please fix the formatting', 'Add more tests'], + thread_ids=None, + head_branch='test-branch', + ) + + # Create mock history + history = [MessageAction(content='Fixed the formatting and added more tests')] + + # Create mock LLM config + llm_config = LLMConfig(model='test-model', api_key='test-key') + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The changes successfully address the review comments.""" + ) + ) + ] + + # Test the guess_success method + with patch.object(LLM, 'completion', return_value=mock_response): + success, success_list, explanation = handler.guess_success(issue, history) + + # Verify the results + assert success is True + assert success_list == [True] + assert ( + '["The changes successfully address the review comments."]' in explanation + ) + + +def test_pr_handler_guess_success_no_comments(): + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR(GitlabPRHandler('test', 'test', 'test'), llm_config) + + # Create a mock issue with no comments + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='Test Body', + thread_comments=None, + closing_issues=['Issue description'], + review_comments=None, + thread_ids=None, + head_branch='test-branch', + ) + + # Create mock history + history = [MessageAction(content='Fixed the issue')] + + # Create mock LLM config + llm_config = LLMConfig(model='test-model', api_key='test-key') + + # Test that it returns appropriate message when no comments are present + success, success_list, explanation = handler.guess_success(issue, history) + assert success is False + assert success_list is None + assert explanation == 'No feedback was found to process' diff --git a/tests/unit/resolver/gitlab/test_gitlab_issue_handler.py b/tests/unit/resolver/gitlab/test_gitlab_issue_handler.py new file mode 100644 index 000000000000..6b5a5c6de609 --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_issue_handler.py @@ -0,0 +1,683 @@ +from unittest.mock import MagicMock, patch + +from openhands.core.config import LLMConfig +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler, GitlabPRHandler +from openhands.resolver.interfaces.issue import ReviewThread +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) + + +def test_get_converted_issues_initializes_review_comments(): + # Mock the necessary dependencies + with patch('requests.get') as mock_get: + # Mock the response for issues + mock_issues_response = MagicMock() + mock_issues_response.json.return_value = [ + {'iid': 1, 'title': 'Test Issue', 'description': 'Test Body'} + ] + # Mock the response for comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [] + + # Set up the mock to return different responses for different calls + # First call is for issues, second call is for comments + mock_get.side_effect = [ + mock_issues_response, + mock_comments_response, + mock_comments_response, + ] # Need two comment responses because we make two API calls + + # Create an instance of IssueHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextIssue( + GitlabIssueHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + issues = handler.get_converted_issues(issue_numbers=[1]) + + # Verify that we got exactly one issue + assert len(issues) == 1 + + # Verify that review_comments is initialized as None + assert issues[0].review_comments is None + + # Verify other fields are set correctly + assert issues[0].number == 1 + assert issues[0].title == 'Test Issue' + assert issues[0].body == 'Test Body' + assert issues[0].owner == 'test-owner' + assert issues[0].repo == 'test-repo' + + +def test_get_converted_issues_handles_empty_body(): + # Mock the necessary dependencies + with patch('requests.get') as mock_get: + # Mock the response for issues + mock_issues_response = MagicMock() + mock_issues_response.json.return_value = [ + {'iid': 1, 'title': 'Test Issue', 'description': None} + ] + # Mock the response for comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [] + # Set up the mock to return different responses + mock_get.side_effect = [ + mock_issues_response, + mock_comments_response, + mock_comments_response, + ] + + # Create an instance of IssueHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextIssue( + GitlabIssueHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + issues = handler.get_converted_issues(issue_numbers=[1]) + + # Verify that we got exactly one issue + assert len(issues) == 1 + + # Verify that body is empty string when None + assert issues[0].body == '' + + # Verify other fields are set correctly + assert issues[0].number == 1 + assert issues[0].title == 'Test Issue' + assert issues[0].owner == 'test-owner' + assert issues[0].repo == 'test-repo' + + # Verify that review_comments is initialized as None + assert issues[0].review_comments is None + + +def test_pr_handler_get_converted_issues_with_comments(): + # Mock the necessary dependencies + with patch('requests.get') as mock_get: + # Mock the response for PRs + mock_prs_response = MagicMock() + mock_prs_response.json.return_value = [ + { + 'iid': 1, + 'title': 'Test PR', + 'description': 'Test Body fixes #1', + 'source_branch': 'test-branch', + } + ] + + # Mock the response for PR comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + {'body': 'First comment', 'resolvable': True, 'system': False}, + {'body': 'Second comment', 'resolvable': True, 'system': False}, + ] + + # Mock the response for PR metadata (GraphQL) + mock_graphql_response = MagicMock() + mock_graphql_response.json.return_value = { + 'data': { + 'project': { + 'mergeRequest': { + 'discussions': {'edges': []}, + } + } + } + } + + # Set up the mock to return different responses + # We need to return empty responses for subsequent pages + mock_empty_response = MagicMock() + mock_empty_response.json.return_value = [] + + # Mock the response for fetching the external issue referenced in PR body + mock_external_issue_response = MagicMock() + mock_external_issue_response.json.return_value = { + 'description': 'This is additional context from an externally referenced issue.' + } + + mock_get.side_effect = [ + mock_prs_response, # First call for PRs + mock_empty_response, # Second call for PRs (empty page) + mock_empty_response, # Third call for related issues + mock_comments_response, # Fourth call for PR comments + mock_empty_response, # Fifth call for PR comments (empty page) + mock_external_issue_response, # Mock response for the external issue reference #1 + ] + + # Mock the post request for GraphQL + with patch('requests.post') as mock_post: + mock_post.return_value = mock_graphql_response + + # Create an instance of PRHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + prs = handler.get_converted_issues(issue_numbers=[1]) + + # Verify that we got exactly one PR + assert len(prs) == 1 + + # Verify that thread_comments are set correctly + assert prs[0].thread_comments == ['First comment', 'Second comment'] + + # Verify other fields are set correctly + assert prs[0].number == 1 + assert prs[0].title == 'Test PR' + assert prs[0].body == 'Test Body fixes #1' + assert prs[0].owner == 'test-owner' + assert prs[0].repo == 'test-repo' + assert prs[0].head_branch == 'test-branch' + assert prs[0].closing_issues == [ + 'This is additional context from an externally referenced issue.' + ] + + +def test_get_issue_comments_with_specific_comment_id(): + # Mock the necessary dependencies + with patch('requests.get') as mock_get: + # Mock the response for comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + {'id': 123, 'body': 'First comment', 'resolvable': True, 'system': False}, + {'id': 456, 'body': 'Second comment', 'resolvable': True, 'system': False}, + ] + + mock_get.return_value = mock_comments_response + + # Create an instance of IssueHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextIssue( + GitlabIssueHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get comments with a specific comment_id + specific_comment = handler.get_issue_comments(issue_number=1, comment_id=123) + + # Verify only the specific comment is returned + assert specific_comment == ['First comment'] + + +def test_pr_handler_get_converted_issues_with_specific_thread_comment(): + # Define the specific comment_id to filter + specific_comment_id = 123 + + # Mock GraphQL response for review threads + with patch('requests.get') as mock_get: + # Mock the response for PRs + mock_prs_response = MagicMock() + mock_prs_response.json.return_value = [ + { + 'iid': 1, + 'title': 'Test PR', + 'description': 'Test Body', + 'source_branch': 'test-branch', + } + ] + + # Mock the response for PR comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + {'body': 'First comment', 'id': 123, 'resolvable': True, 'system': False}, + {'body': 'Second comment', 'id': 124, 'resolvable': True, 'system': False}, + ] + + # Mock the response for PR metadata (GraphQL) + mock_graphql_response = MagicMock() + mock_graphql_response.json.return_value = { + 'data': { + 'project': { + 'mergeRequest': { + 'discussions': { + 'edges': [ + { + 'node': { + 'id': 'review-thread-1', + 'resolved': False, + 'resolvable': True, + 'notes': { + 'nodes': [ + { + 'id': 'GID/121', + 'body': 'Specific review comment', + 'position': { + 'filePath': 'file1.txt', + }, + }, + { + 'id': 'GID/456', + 'body': 'Another review comment', + 'position': { + 'filePath': 'file2.txt', + }, + }, + ] + }, + } + } + ] + }, + } + } + } + } + + # Set up the mock to return different responses + # We need to return empty responses for subsequent pages + mock_empty_response = MagicMock() + mock_empty_response.json.return_value = [] + + mock_get.side_effect = [ + mock_prs_response, # First call for PRs + mock_empty_response, # Second call for PRs (empty page) + mock_empty_response, # Third call for related issues + mock_comments_response, # Fourth call for PR comments + mock_empty_response, # Fifth call for PR comments (empty page) + ] + + # Mock the post request for GraphQL + with patch('requests.post') as mock_post: + mock_post.return_value = mock_graphql_response + + # Create an instance of PRHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + prs = handler.get_converted_issues( + issue_numbers=[1], comment_id=specific_comment_id + ) + + # Verify that we got exactly one PR + assert len(prs) == 1 + + # Verify that thread_comments are set correctly + assert prs[0].thread_comments == ['First comment'] + assert prs[0].review_comments is None + assert prs[0].review_threads == [] + + # Verify other fields are set correctly + assert prs[0].number == 1 + assert prs[0].title == 'Test PR' + assert prs[0].body == 'Test Body' + assert prs[0].owner == 'test-owner' + assert prs[0].repo == 'test-repo' + assert prs[0].head_branch == 'test-branch' + + +def test_pr_handler_get_converted_issues_with_specific_review_thread_comment(): + # Define the specific comment_id to filter + specific_comment_id = 123 + + # Mock GraphQL response for review threads + with patch('requests.get') as mock_get: + # Mock the response for PRs + mock_prs_response = MagicMock() + mock_prs_response.json.return_value = [ + { + 'iid': 1, + 'title': 'Test PR', + 'description': 'Test Body', + 'source_branch': 'test-branch', + } + ] + + # Mock the response for PR comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + { + 'description': 'First comment', + 'id': 120, + 'resolvable': True, + 'system': False, + }, + { + 'description': 'Second comment', + 'id': 124, + 'resolvable': True, + 'system': False, + }, + ] + + # Mock the response for PR metadata (GraphQL) + mock_graphql_response = MagicMock() + mock_graphql_response.json.return_value = { + 'data': { + 'project': { + 'mergeRequest': { + 'discussions': { + 'edges': [ + { + 'node': { + 'id': 'review-thread-1', + 'resolved': False, + 'resolvable': True, + 'notes': { + 'nodes': [ + { + 'id': f'GID/{specific_comment_id}', + 'body': 'Specific review comment', + 'position': { + 'filePath': 'file1.txt', + }, + }, + { + 'id': 'GID/456', + 'body': 'Another review comment', + 'position': { + 'filePath': 'file1.txt', + }, + }, + ] + }, + } + } + ] + }, + } + } + } + } + + # Set up the mock to return different responses + # We need to return empty responses for subsequent pages + mock_empty_response = MagicMock() + mock_empty_response.json.return_value = [] + + mock_get.side_effect = [ + mock_prs_response, # First call for PRs + mock_empty_response, # Second call for PRs (empty page) + mock_empty_response, # Third call for related issues + mock_comments_response, # Fourth call for PR comments + mock_empty_response, # Fifth call for PR comments (empty page) + ] + + # Mock the post request for GraphQL + with patch('requests.post') as mock_post: + mock_post.return_value = mock_graphql_response + + # Create an instance of PRHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + prs = handler.get_converted_issues( + issue_numbers=[1], comment_id=specific_comment_id + ) + + # Verify that we got exactly one PR + assert len(prs) == 1 + + # Verify that thread_comments are set correctly + assert prs[0].thread_comments is None + assert prs[0].review_comments is None + assert len(prs[0].review_threads) == 1 + assert isinstance(prs[0].review_threads[0], ReviewThread) + assert ( + prs[0].review_threads[0].comment + == 'Specific review comment\n---\nlatest feedback:\nAnother review comment\n' + ) + assert prs[0].review_threads[0].files == ['file1.txt'] + + # Verify other fields are set correctly + assert prs[0].number == 1 + assert prs[0].title == 'Test PR' + assert prs[0].body == 'Test Body' + assert prs[0].owner == 'test-owner' + assert prs[0].repo == 'test-repo' + assert prs[0].head_branch == 'test-branch' + + +def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs(): + # Define the specific comment_id to filter + specific_comment_id = 123 + + # Mock GraphQL response for review threads + with patch('requests.get') as mock_get: + # Mock the response for PRs + mock_prs_response = MagicMock() + mock_prs_response.json.return_value = [ + { + 'iid': 1, + 'title': 'Test PR fixes #3', + 'description': 'Test Body', + 'source_branch': 'test-branch', + } + ] + + # Mock the response for PR comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + { + 'description': 'First comment', + 'id': 120, + 'resolvable': True, + 'system': False, + }, + { + 'description': 'Second comment', + 'id': 124, + 'resolvable': True, + 'system': False, + }, + ] + + # Mock the response for PR metadata (GraphQL) + mock_graphql_response = MagicMock() + mock_graphql_response.json.return_value = { + 'data': { + 'project': { + 'mergeRequest': { + 'discussions': { + 'edges': [ + { + 'node': { + 'id': 'review-thread-1', + 'resolved': False, + 'resolvable': True, + 'notes': { + 'nodes': [ + { + 'id': f'GID/{specific_comment_id}', + 'body': 'Specific review comment that references #6', + 'position': { + 'filePath': 'file1.txt', + }, + }, + { + 'id': 'GID/456', + 'body': 'Another review comment referencing #7', + 'position': { + 'filePath': 'file2.txt', + }, + }, + ] + }, + } + } + ] + }, + } + } + } + } + + # Set up the mock to return different responses + # We need to return empty responses for subsequent pages + mock_empty_response = MagicMock() + mock_empty_response.json.return_value = [] + + # Mock the response for fetching the external issue referenced in PR body + mock_external_issue_response_in_body = MagicMock() + mock_external_issue_response_in_body.json.return_value = { + 'description': 'External context #1.' + } + + # Mock the response for fetching the external issue referenced in review thread + mock_external_issue_response_review_thread = MagicMock() + mock_external_issue_response_review_thread.json.return_value = { + 'description': 'External context #2.' + } + + mock_get.side_effect = [ + mock_prs_response, # First call for PRs + mock_empty_response, # Second call for PRs (empty page) + mock_empty_response, # Third call for related issues + mock_comments_response, # Fourth call for PR comments + mock_empty_response, # Fifth call for PR comments (empty page) + mock_external_issue_response_in_body, + mock_external_issue_response_review_thread, + ] + + # Mock the post request for GraphQL + with patch('requests.post') as mock_post: + mock_post.return_value = mock_graphql_response + + # Create an instance of PRHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + prs = handler.get_converted_issues( + issue_numbers=[1], comment_id=specific_comment_id + ) + + # Verify that we got exactly one PR + assert len(prs) == 1 + + # Verify that thread_comments are set correctly + assert prs[0].thread_comments is None + assert prs[0].review_comments is None + assert len(prs[0].review_threads) == 1 + assert isinstance(prs[0].review_threads[0], ReviewThread) + assert ( + prs[0].review_threads[0].comment + == 'Specific review comment that references #6\n---\nlatest feedback:\nAnother review comment referencing #7\n' + ) + assert prs[0].closing_issues == [ + 'External context #1.', + 'External context #2.', + ] # Only includes references inside comment ID and body PR + + # Verify other fields are set correctly + assert prs[0].number == 1 + assert prs[0].title == 'Test PR fixes #3' + assert prs[0].body == 'Test Body' + assert prs[0].owner == 'test-owner' + assert prs[0].repo == 'test-repo' + assert prs[0].head_branch == 'test-branch' + + +def test_pr_handler_get_converted_issues_with_duplicate_issue_refs(): + # Mock the necessary dependencies + with patch('requests.get') as mock_get: + # Mock the response for PRs + mock_prs_response = MagicMock() + mock_prs_response.json.return_value = [ + { + 'iid': 1, + 'title': 'Test PR', + 'description': 'Test Body fixes #1', + 'source_branch': 'test-branch', + } + ] + + # Mock the response for PR comments + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + { + 'body': 'First comment addressing #1', + 'resolvable': True, + 'system': False, + }, + { + 'body': 'Second comment addressing #2', + 'resolvable': True, + 'system': False, + }, + ] + + # Mock the response for PR metadata (GraphQL) + mock_graphql_response = MagicMock() + mock_graphql_response.json.return_value = { + 'data': { + 'project': { + 'mergeRequest': { + 'discussions': {'edges': []}, + } + } + } + } + + # Set up the mock to return different responses + # We need to return empty responses for subsequent pages + mock_empty_response = MagicMock() + mock_empty_response.json.return_value = [] + + # Mock the response for fetching the external issue referenced in PR body + mock_external_issue_response_in_body = MagicMock() + mock_external_issue_response_in_body.json.return_value = { + 'description': 'External context #1.' + } + + # Mock the response for fetching the external issue referenced in review thread + mock_external_issue_response_in_comment = MagicMock() + mock_external_issue_response_in_comment.json.return_value = { + 'description': 'External context #2.' + } + + mock_get.side_effect = [ + mock_prs_response, # First call for PRs + mock_empty_response, # Second call for PRs (empty page) + mock_empty_response, # Third call for related issues + mock_comments_response, # Fourth call for PR comments + mock_empty_response, # Fifth call for PR comments (empty page) + mock_external_issue_response_in_body, # Mock response for the external issue reference #1 + mock_external_issue_response_in_comment, + ] + + # Mock the post request for GraphQL + with patch('requests.post') as mock_post: + mock_post.return_value = mock_graphql_response + + # Create an instance of PRHandler + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Get converted issues + prs = handler.get_converted_issues(issue_numbers=[1]) + + # Verify that we got exactly one PR + assert len(prs) == 1 + + # Verify that thread_comments are set correctly + assert prs[0].thread_comments == [ + 'First comment addressing #1', + 'Second comment addressing #2', + ] + + # Verify other fields are set correctly + assert prs[0].number == 1 + assert prs[0].title == 'Test PR' + assert prs[0].body == 'Test Body fixes #1' + assert prs[0].owner == 'test-owner' + assert prs[0].repo == 'test-repo' + assert prs[0].head_branch == 'test-branch' + assert prs[0].closing_issues == [ + 'External context #1.', + 'External context #2.', + ] diff --git a/tests/unit/resolver/gitlab/test_gitlab_issue_handler_error_handling.py b/tests/unit/resolver/gitlab/test_gitlab_issue_handler_error_handling.py new file mode 100644 index 000000000000..66978ebc8984 --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_issue_handler_error_handling.py @@ -0,0 +1,283 @@ +from unittest.mock import MagicMock, patch + +import pytest +import requests +from litellm.exceptions import RateLimitError + +from openhands.core.config import LLMConfig +from openhands.events.action.message import MessageAction +from openhands.llm.llm import LLM +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler, GitlabPRHandler +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) + + +@pytest.fixture(autouse=True) +def mock_logger(monkeypatch): + # suppress logging of completion data to file + mock_logger = MagicMock() + monkeypatch.setattr('openhands.llm.debug_mixin.llm_prompt_logger', mock_logger) + monkeypatch.setattr('openhands.llm.debug_mixin.llm_response_logger', mock_logger) + return mock_logger + + +@pytest.fixture +def default_config(): + return LLMConfig( + model='gpt-4o', + api_key='test_key', + num_retries=2, + retry_min_wait=1, + retry_max_wait=2, + ) + + +def test_handle_nonexistent_issue_reference(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Mock the requests.get to simulate a 404 error + mock_response = MagicMock() + mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError( + '404 Client Error: Not Found' + ) + + with patch('requests.get', return_value=mock_response): + # Call the method with a non-existent issue reference + result = handler._strategy.get_context_from_external_issues_references( + closing_issues=[], + closing_issue_numbers=[], + issue_body='This references #999999', # Non-existent issue + review_comments=[], + review_threads=[], + thread_comments=None, + ) + + # The method should return an empty list since the referenced issue couldn't be fetched + assert result == [] + + +def test_handle_rate_limit_error(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Mock the requests.get to simulate a rate limit error + mock_response = MagicMock() + mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError( + '403 Client Error: Rate Limit Exceeded' + ) + + with patch('requests.get', return_value=mock_response): + # Call the method with an issue reference + result = handler._strategy.get_context_from_external_issues_references( + closing_issues=[], + closing_issue_numbers=[], + issue_body='This references #123', + review_comments=[], + review_threads=[], + thread_comments=None, + ) + + # The method should return an empty list since the request was rate limited + assert result == [] + + +def test_handle_network_error(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Mock the requests.get to simulate a network error + with patch( + 'requests.get', side_effect=requests.exceptions.ConnectionError('Network Error') + ): + # Call the method with an issue reference + result = handler._strategy.get_context_from_external_issues_references( + closing_issues=[], + closing_issue_numbers=[], + issue_body='This references #123', + review_comments=[], + review_threads=[], + thread_comments=None, + ) + + # The method should return an empty list since the network request failed + assert result == [] + + +def test_successful_issue_reference(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Mock a successful response + mock_response = MagicMock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = { + 'description': 'This is the referenced issue body' + } + + with patch('requests.get', return_value=mock_response): + # Call the method with an issue reference + result = handler._strategy.get_context_from_external_issues_references( + closing_issues=[], + closing_issue_numbers=[], + issue_body='This references #123', + review_comments=[], + review_threads=[], + thread_comments=None, + ) + + # The method should return a list with the referenced issue body + assert result == ['This is the referenced issue body'] + + +class MockLLMResponse: + """Mock LLM Response class to mimic the actual LLM response structure.""" + + class Choice: + class Message: + def __init__(self, content): + self.content = content + + def __init__(self, content): + self.message = self.Message(content) + + def __init__(self, content): + self.choices = [self.Choice(content)] + + +class DotDict(dict): + """ + A dictionary that supports dot notation access. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + for key, value in self.items(): + if isinstance(value, dict): + self[key] = DotDict(value) + elif isinstance(value, list): + self[key] = [ + DotDict(item) if isinstance(item, dict) else item for item in value + ] + + def __getattr__(self, key): + if key in self: + return self[key] + else: + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{key}'" + ) + + def __setattr__(self, key, value): + self[key] = value + + def __delattr__(self, key): + if key in self: + del self[key] + else: + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{key}'" + ) + + +@patch('openhands.llm.llm.litellm_completion') +def test_guess_success_rate_limit_wait_time(mock_litellm_completion, default_config): + """Test that the retry mechanism in guess_success respects wait time between retries.""" + + with patch('time.sleep') as mock_sleep: + # Simulate a rate limit error followed by a successful response + mock_litellm_completion.side_effect = [ + RateLimitError( + 'Rate limit exceeded', llm_provider='test_provider', model='test_model' + ), + DotDict( + { + 'choices': [ + { + 'message': { + 'content': '--- success\ntrue\n--- explanation\nRetry successful' + } + } + ] + } + ), + ] + + llm = LLM(config=default_config) + handler = ServiceContextIssue( + GitlabIssueHandler('test-owner', 'test-repo', 'test-token'), default_config + ) + handler.llm = llm + + # Mock issue and history + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test Issue', + body='This is a test issue.', + thread_comments=['Please improve error handling'], + ) + history = [MessageAction(content='Fixed error handling.')] + + # Call guess_success + success, _, explanation = handler.guess_success(issue, history) + + # Assertions + assert success is True + assert explanation == 'Retry successful' + assert mock_litellm_completion.call_count == 2 # Two attempts made + mock_sleep.assert_called_once() # Sleep called once between retries + + # Validate wait time + wait_time = mock_sleep.call_args[0][0] + assert ( + default_config.retry_min_wait <= wait_time <= default_config.retry_max_wait + ), f'Expected wait time between {default_config.retry_min_wait} and {default_config.retry_max_wait} seconds, but got {wait_time}' + + +@patch('openhands.llm.llm.litellm_completion') +def test_guess_success_exhausts_retries(mock_completion, default_config): + """Test the retry mechanism in guess_success exhausts retries and raises an error.""" + # Simulate persistent rate limit errors by always raising RateLimitError + mock_completion.side_effect = RateLimitError( + 'Rate limit exceeded', llm_provider='test_provider', model='test_model' + ) + + # Initialize LLM and handler + llm = LLM(config=default_config) + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), default_config + ) + handler.llm = llm + + # Mock issue and history + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test Issue', + body='This is a test issue.', + thread_comments=['Please improve error handling'], + ) + history = [MessageAction(content='Fixed error handling.')] + + # Call guess_success and expect it to raise an error after retries + with pytest.raises(RateLimitError): + handler.guess_success(issue, history) + + # Assertions + assert ( + mock_completion.call_count == default_config.num_retries + ) # Initial call + retries diff --git a/tests/unit/resolver/gitlab/test_gitlab_pr_handler_guess_success.py b/tests/unit/resolver/gitlab/test_gitlab_pr_handler_guess_success.py new file mode 100644 index 000000000000..a5596d7d76df --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_pr_handler_guess_success.py @@ -0,0 +1,672 @@ +import json +from unittest.mock import MagicMock, patch + +import pytest + +from openhands.core.config import LLMConfig +from openhands.events.action.message import MessageAction +from openhands.llm.llm import LLM +from openhands.resolver.interfaces.gitlab import GitlabPRHandler +from openhands.resolver.interfaces.issue import Issue, ReviewThread +from openhands.resolver.interfaces.issue_definitions import ServiceContextPR + + +@pytest.fixture +def pr_handler(): + llm_config = LLMConfig(model='test-model') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + return handler + + +@pytest.fixture +def mock_llm_success_response(): + return MagicMock( + choices=[ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The changes look good""" + ) + ) + ] + ) + + +def test_guess_success_review_threads_litellm_call(): + """Test that the completion() call for review threads contains the expected content.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create a mock issue with review threads + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='Test Body', + thread_comments=None, + closing_issues=['Issue 1 description', 'Issue 2 description'], + review_comments=None, + review_threads=[ + ReviewThread( + comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings', + files=['/src/file1.py', '/src/file2.py'], + ), + ReviewThread( + comment='Add more tests\n---\nlatest feedback:\nAdd test cases', + files=['/tests/test_file.py'], + ), + ], + thread_ids=['1', '2'], + head_branch='test-branch', + ) + + # Create mock history with a detailed response + history = [ + MessageAction( + content="""I have made the following changes: +1. Fixed formatting in file1.py and file2.py +2. Added docstrings to all functions +3. Added test cases in test_file.py""" + ) + ] + + # Create mock LLM config + llm_config = LLMConfig(model='test-model', api_key='test-key') + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The changes successfully address the feedback.""" + ) + ) + ] + + # Test the guess_success method + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, success_list, explanation = handler.guess_success(issue, history) + + # Verify the completion() calls + assert mock_completion.call_count == 2 # One call per review thread + + # Check first call + first_call = mock_completion.call_args_list[0] + first_prompt = first_call[1]['messages'][0]['content'] + assert ( + 'Issue descriptions:\n' + + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4) + in first_prompt + ) + assert ( + 'Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings' + in first_prompt + ) + assert ( + 'Files locations:\n' + + json.dumps(['/src/file1.py', '/src/file2.py'], indent=4) + in first_prompt + ) + assert 'Last message from AI agent:\n' + history[0].content in first_prompt + + # Check second call + second_call = mock_completion.call_args_list[1] + second_prompt = second_call[1]['messages'][0]['content'] + assert ( + 'Issue descriptions:\n' + + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4) + in second_prompt + ) + assert ( + 'Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases' + in second_prompt + ) + assert ( + 'Files locations:\n' + json.dumps(['/tests/test_file.py'], indent=4) + in second_prompt + ) + assert 'Last message from AI agent:\n' + history[0].content in second_prompt + + assert len(json.loads(explanation)) == 2 + + +def test_guess_success_thread_comments_litellm_call(): + """Test that the completion() call for thread comments contains the expected content.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create a mock issue with thread comments + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='Test Body', + thread_comments=[ + 'Please improve error handling', + 'Add input validation', + 'latest feedback:\nHandle edge cases', + ], + closing_issues=['Issue 1 description', 'Issue 2 description'], + review_comments=None, + thread_ids=None, + head_branch='test-branch', + ) + + # Create mock history with a detailed response + history = [ + MessageAction( + content="""I have made the following changes: +1. Added try/catch blocks for error handling +2. Added input validation checks +3. Added handling for edge cases""" + ) + ] + + # Create mock LLM config + llm_config = LLMConfig(model='test-model', api_key='test-key') + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The changes successfully address the feedback.""" + ) + ) + ] + + # Test the guess_success method + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, success_list, explanation = handler.guess_success(issue, history) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert ( + 'Issue descriptions:\n' + + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4) + in prompt + ) + assert 'PR Thread Comments:\n' + '\n---\n'.join(issue.thread_comments) in prompt + assert 'Last message from AI agent:\n' + history[0].content in prompt + + assert len(json.loads(explanation)) == 1 + + +def test_check_feedback_with_llm(): + """Test the _check_feedback_with_llm helper function.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Test cases for different LLM responses + test_cases = [ + { + 'response': '--- success\ntrue\n--- explanation\nChanges look good', + 'expected': (True, 'Changes look good'), + }, + { + 'response': '--- success\nfalse\n--- explanation\nNot all issues fixed', + 'expected': (False, 'Not all issues fixed'), + }, + { + 'response': 'Invalid response format', + 'expected': ( + False, + 'Failed to decode answer from LLM response: Invalid response format', + ), + }, + { + 'response': '--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere', + 'expected': (True, 'Multiline\nexplanation\nhere'), + }, + ] + + for case in test_cases: + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [MagicMock(message=MagicMock(content=case['response']))] + + # Test the function + with patch.object(LLM, 'completion', return_value=mock_response): + success, explanation = handler._check_feedback_with_llm('test prompt') + assert (success, explanation) == case['expected'] + + +def test_check_review_thread_with_git_patch(): + """Test that git patch from complete_runtime is included in the prompt.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create test data + review_thread = ReviewThread( + comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings', + files=['/src/file1.py', '/src/file2.py'], + ) + issues_context = json.dumps( + ['Issue 1 description', 'Issue 2 description'], indent=4 + ) + last_message = 'I have fixed the formatting and added docstrings' + git_patch = 'diff --git a/src/file1.py b/src/file1.py\n+"""Added docstring."""\n' + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +Changes look good""" + ) + ) + ] + + # Test the function + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, explanation = handler._check_review_thread( + review_thread, issues_context, last_message, git_patch + ) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert 'Issue descriptions:\n' + issues_context in prompt + assert 'Feedback:\n' + review_thread.comment in prompt + assert ( + 'Files locations:\n' + json.dumps(review_thread.files, indent=4) in prompt + ) + assert 'Last message from AI agent:\n' + last_message in prompt + assert 'Changes made (git patch):\n' + git_patch in prompt + + # Check result + assert success is True + assert explanation == 'Changes look good' + + +def test_check_review_thread(): + """Test the _check_review_thread helper function.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create test data + review_thread = ReviewThread( + comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings', + files=['/src/file1.py', '/src/file2.py'], + ) + issues_context = json.dumps( + ['Issue 1 description', 'Issue 2 description'], indent=4 + ) + last_message = 'I have fixed the formatting and added docstrings' + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +Changes look good""" + ) + ) + ] + + # Test the function + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, explanation = handler._check_review_thread( + review_thread, issues_context, last_message + ) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert 'Issue descriptions:\n' + issues_context in prompt + assert 'Feedback:\n' + review_thread.comment in prompt + assert ( + 'Files locations:\n' + json.dumps(review_thread.files, indent=4) in prompt + ) + assert 'Last message from AI agent:\n' + last_message in prompt + + # Check result + assert success is True + assert explanation == 'Changes look good' + + +def test_check_thread_comments_with_git_patch(): + """Test that git patch from complete_runtime is included in the prompt.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create test data + thread_comments = [ + 'Please improve error handling', + 'Add input validation', + 'latest feedback:\nHandle edge cases', + ] + issues_context = json.dumps( + ['Issue 1 description', 'Issue 2 description'], indent=4 + ) + last_message = 'I have added error handling and input validation' + git_patch = 'diff --git a/src/file1.py b/src/file1.py\n+try:\n+ validate_input()\n+except ValueError:\n+ handle_error()\n' + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +Changes look good""" + ) + ) + ] + + # Test the function + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, explanation = handler._check_thread_comments( + thread_comments, issues_context, last_message, git_patch + ) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert 'Issue descriptions:\n' + issues_context in prompt + assert 'PR Thread Comments:\n' + '\n---\n'.join(thread_comments) in prompt + assert 'Last message from AI agent:\n' + last_message in prompt + assert 'Changes made (git patch):\n' + git_patch in prompt + + # Check result + assert success is True + assert explanation == 'Changes look good' + + +def test_check_thread_comments(): + """Test the _check_thread_comments helper function.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create test data + thread_comments = [ + 'Please improve error handling', + 'Add input validation', + 'latest feedback:\nHandle edge cases', + ] + issues_context = json.dumps( + ['Issue 1 description', 'Issue 2 description'], indent=4 + ) + last_message = 'I have added error handling and input validation' + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +Changes look good""" + ) + ) + ] + + # Test the function + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, explanation = handler._check_thread_comments( + thread_comments, issues_context, last_message + ) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert 'Issue descriptions:\n' + issues_context in prompt + assert 'PR Thread Comments:\n' + '\n---\n'.join(thread_comments) in prompt + assert 'Last message from AI agent:\n' + last_message in prompt + + # Check result + assert success is True + assert explanation == 'Changes look good' + + +def test_check_review_comments_with_git_patch(): + """Test that git patch from complete_runtime is included in the prompt.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create test data + review_comments = [ + 'Please fix the code style', + 'Add more test cases', + 'latest feedback:\nImprove documentation', + ] + issues_context = json.dumps( + ['Issue 1 description', 'Issue 2 description'], indent=4 + ) + last_message = 'I have fixed the code style and added tests' + git_patch = 'diff --git a/src/file1.py b/src/file1.py\n+"""This module does X."""\n+def func():\n+ """Do Y."""\n' + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +Changes look good""" + ) + ) + ] + + # Test the function + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, explanation = handler._check_review_comments( + review_comments, issues_context, last_message, git_patch + ) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert 'Issue descriptions:\n' + issues_context in prompt + assert 'PR Review Comments:\n' + '\n---\n'.join(review_comments) in prompt + assert 'Last message from AI agent:\n' + last_message in prompt + assert 'Changes made (git patch):\n' + git_patch in prompt + + # Check result + assert success is True + assert explanation == 'Changes look good' + + +def test_check_review_comments(): + """Test the _check_review_comments helper function.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create test data + review_comments = [ + 'Please improve code readability', + 'Add comments to complex functions', + 'Follow PEP 8 style guide', + ] + issues_context = json.dumps( + ['Issue 1 description', 'Issue 2 description'], indent=4 + ) + last_message = 'I have improved code readability and added comments' + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +Changes look good""" + ) + ) + ] + + # Test the function + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, explanation = handler._check_review_comments( + review_comments, issues_context, last_message + ) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert 'Issue descriptions:\n' + issues_context in prompt + assert 'PR Review Comments:\n' + '\n---\n'.join(review_comments) in prompt + assert 'Last message from AI agent:\n' + last_message in prompt + + # Check result + assert success is True + assert explanation == 'Changes look good' + + +def test_guess_success_review_comments_litellm_call(): + """Test that the completion() call for review comments contains the expected content.""" + # Create a PR handler instance + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR( + GitlabPRHandler('test-owner', 'test-repo', 'test-token'), llm_config + ) + + # Create a mock issue with review comments + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='Test Body', + thread_comments=None, + closing_issues=['Issue 1 description', 'Issue 2 description'], + review_comments=[ + 'Please improve code readability', + 'Add comments to complex functions', + 'Follow PEP 8 style guide', + ], + thread_ids=None, + head_branch='test-branch', + ) + + # Create mock history with a detailed response + history = [ + MessageAction( + content="""I have made the following changes: +1. Improved code readability by breaking down complex functions +2. Added detailed comments to all complex functions +3. Fixed code style to follow PEP 8""" + ) + ] + + # Mock the LLM response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock( + message=MagicMock( + content="""--- success +true + +--- explanation +The changes successfully address the feedback.""" + ) + ) + ] + + with patch.object(LLM, 'completion') as mock_completion: + mock_completion.return_value = mock_response + success, success_list, explanation = handler.guess_success(issue, history) + + # Verify the completion() call + mock_completion.assert_called_once() + call_args = mock_completion.call_args + prompt = call_args[1]['messages'][0]['content'] + + # Check prompt content + assert ( + 'Issue descriptions:\n' + + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4) + in prompt + ) + assert 'PR Review Comments:\n' + '\n---\n'.join(issue.review_comments) in prompt + assert 'Last message from AI agent:\n' + history[0].content in prompt + + assert len(json.loads(explanation)) == 1 diff --git a/tests/unit/resolver/gitlab/test_gitlab_pr_title_escaping.py b/tests/unit/resolver/gitlab/test_gitlab_pr_title_escaping.py new file mode 100644 index 000000000000..54709d2f3620 --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_pr_title_escaping.py @@ -0,0 +1,167 @@ +import os +import subprocess +import tempfile + +from openhands.core.logger import openhands_logger as logger +from openhands.resolver.interfaces.issue import Issue +from openhands.resolver.send_pull_request import make_commit +from openhands.resolver.utils import Platform + + +def test_commit_message_with_quotes(): + # Create a temporary directory and initialize git repo + with tempfile.TemporaryDirectory() as temp_dir: + subprocess.run(['git', 'init', temp_dir], check=True) + + # Create a test file and add it to git + test_file = os.path.join(temp_dir, 'test.txt') + with open(test_file, 'w') as f: + f.write('test content') + + subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True) + + # Create a test issue with problematic title + issue = Issue( + owner='test-owner', + repo='test-repo', + number=123, + title="Issue with 'quotes' and \"double quotes\" and ", + body='Test body', + labels=[], + assignees=[], + state='open', + created_at='2024-01-01T00:00:00Z', + updated_at='2024-01-01T00:00:00Z', + closed_at=None, + head_branch=None, + thread_ids=None, + ) + + # Make the commit + make_commit(temp_dir, issue, 'issue') + + # Get the commit message + result = subprocess.run( + ['git', '-C', temp_dir, 'log', '-1', '--pretty=%B'], + capture_output=True, + text=True, + check=True, + ) + commit_msg = result.stdout.strip() + + # The commit message should contain the quotes without excessive escaping + expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and " + assert commit_msg == expected, f'Expected: {expected}\nGot: {commit_msg}' + + +def test_pr_title_with_quotes(monkeypatch): + # Mock requests.post to avoid actual API calls + class MockResponse: + def __init__(self, status_code=201): + self.status_code = status_code + self.text = '' + + def json(self): + return {'html_url': 'https://github.com/test/test/pull/1'} + + def raise_for_status(self): + pass + + def mock_post(*args, **kwargs): + # Verify that the PR title is not over-escaped + data = kwargs.get('json', {}) + title = data.get('title', '') + expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and " + assert ( + title == expected + ), f'PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}' + return MockResponse() + + class MockGetResponse: + def __init__(self, status_code=200): + self.status_code = status_code + self.text = '' + + def json(self): + return {'default_branch': 'main'} + + def raise_for_status(self): + pass + + monkeypatch.setattr('requests.post', mock_post) + monkeypatch.setattr('requests.get', lambda *args, **kwargs: MockGetResponse()) + monkeypatch.setattr( + 'openhands.resolver.interfaces.github.GithubIssueHandler.branch_exists', + lambda *args, **kwargs: False, + ) + + # Mock subprocess.run to avoid actual git commands + original_run = subprocess.run + + def mock_run(*args, **kwargs): + logger.info(f"Running command: {args[0] if args else kwargs.get('args', [])}") + if isinstance(args[0], list) and args[0][0] == 'git': + if 'push' in args[0]: + return subprocess.CompletedProcess( + args[0], returncode=0, stdout='', stderr='' + ) + return original_run(*args, **kwargs) + return original_run(*args, **kwargs) + + monkeypatch.setattr('subprocess.run', mock_run) + + # Create a temporary directory and initialize git repo + with tempfile.TemporaryDirectory() as temp_dir: + logger.info('Initializing git repo...') + subprocess.run(['git', 'init', temp_dir], check=True) + + # Add these lines to configure git + subprocess.run( + ['git', '-C', temp_dir, 'config', 'user.name', 'Test User'], check=True + ) + subprocess.run( + ['git', '-C', temp_dir, 'config', 'user.email', 'test@example.com'], + check=True, + ) + + # Create a test file and add it to git + test_file = os.path.join(temp_dir, 'test.txt') + with open(test_file, 'w') as f: + f.write('test content') + + logger.info('Adding and committing test file...') + subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True) + subprocess.run( + ['git', '-C', temp_dir, 'commit', '-m', 'Initial commit'], check=True + ) + + # Create a test issue with problematic title + logger.info('Creating test issue...') + issue = Issue( + owner='test-owner', + repo='test-repo', + number=123, + title="Issue with 'quotes' and \"double quotes\" and ", + body='Test body', + labels=[], + assignees=[], + state='open', + created_at='2024-01-01T00:00:00Z', + updated_at='2024-01-01T00:00:00Z', + closed_at=None, + head_branch=None, + thread_ids=None, + ) + + # Try to send a PR - this will fail if the title is incorrectly escaped + logger.info('Sending PR...') + from openhands.resolver.send_pull_request import send_pull_request + + send_pull_request( + issue=issue, + token='dummy-token', + username='test-user', + platform=Platform.GITHUB, + patch_dir=temp_dir, + pr_type='ready', + ) diff --git a/tests/unit/resolver/gitlab/test_gitlab_resolve_issues.py b/tests/unit/resolver/gitlab/test_gitlab_resolve_issues.py new file mode 100644 index 000000000000..a2dbd336cbe8 --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_resolve_issues.py @@ -0,0 +1,923 @@ +import os +import tempfile +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from openhands.core.config import LLMConfig +from openhands.events.action import CmdRunAction +from openhands.events.observation import ( + CmdOutputMetadata, + CmdOutputObservation, + NullObservation, +) +from openhands.llm.llm import LLM +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler, GitlabPRHandler +from openhands.resolver.interfaces.issue import Issue, ReviewThread +from openhands.resolver.interfaces.issue_definitions import ( + ServiceContextIssue, + ServiceContextPR, +) +from openhands.resolver.resolve_issue import ( + complete_runtime, + initialize_runtime, + process_issue, +) +from openhands.resolver.resolver_output import ResolverOutput +from openhands.resolver.utils import Platform + + +@pytest.fixture +def mock_output_dir(): + with tempfile.TemporaryDirectory() as temp_dir: + repo_path = os.path.join(temp_dir, 'repo') + # Initialize a Gitlab repo in "repo" and add a commit with "README.md" + os.makedirs(repo_path) + os.system(f'git init {repo_path}') + readme_path = os.path.join(repo_path, 'README.md') + with open(readme_path, 'w') as f: + f.write('hello world') + os.system(f'git -C {repo_path} add README.md') + os.system(f"git -C {repo_path} commit -m 'Initial commit'") + yield temp_dir + + +@pytest.fixture +def mock_subprocess(): + with patch('subprocess.check_output') as mock_check_output: + yield mock_check_output + + +@pytest.fixture +def mock_os(): + with patch('os.system') as mock_system, patch('os.path.join') as mock_join: + yield mock_system, mock_join + + +@pytest.fixture +def mock_prompt_template(): + return 'Issue: {{ body }}\n\nPlease fix this issue.' + + +@pytest.fixture +def mock_followup_prompt_template(): + return 'Issue context: {{ issues }}\n\nReview comments: {{ review_comments }}\n\nReview threads: {{ review_threads }}\n\nFiles: {{ files }}\n\nThread comments: {{ thread_context }}\n\nPlease fix this issue.' + + +def create_cmd_output(exit_code: int, content: str, command: str): + return CmdOutputObservation( + content=content, + command=command, + metadata=CmdOutputMetadata(exit_code=exit_code), + ) + + +def test_initialize_runtime(): + mock_runtime = MagicMock() + + if os.getenv('GITLAB_CI') == 'true': + mock_runtime.run_action.side_effect = [ + create_cmd_output(exit_code=0, content='', command='cd /workspace'), + create_cmd_output( + exit_code=0, content='', command='sudo chown -R 1001:0 /workspace/*' + ), + create_cmd_output( + exit_code=0, content='', command='git config --global core.pager ""' + ), + ] + else: + mock_runtime.run_action.side_effect = [ + create_cmd_output(exit_code=0, content='', command='cd /workspace'), + create_cmd_output( + exit_code=0, content='', command='git config --global core.pager ""' + ), + ] + + initialize_runtime(mock_runtime, Platform.GITLAB) + + if os.getenv('GITLAB_CI') == 'true': + assert mock_runtime.run_action.call_count == 3 + else: + assert mock_runtime.run_action.call_count == 2 + + mock_runtime.run_action.assert_any_call(CmdRunAction(command='cd /workspace')) + if os.getenv('GITLAB_CI') == 'true': + mock_runtime.run_action.assert_any_call( + CmdRunAction(command='sudo chown -R 1001:0 /workspace/*') + ) + mock_runtime.run_action.assert_any_call( + CmdRunAction(command='git config --global core.pager ""') + ) + + +@pytest.mark.asyncio +async def test_resolve_issue_no_issues_found(): + from openhands.resolver.resolve_issue import resolve_issue + + # Mock dependencies + mock_handler = MagicMock() + mock_handler.get_converted_issues.return_value = [] # Return empty list + + with patch( + 'openhands.resolver.resolve_issue.issue_handler_factory', + return_value=mock_handler, + ): + with pytest.raises(ValueError) as exc_info: + await resolve_issue( + owner='test-owner', + repo='test-repo', + token='test-token', + username='test-user', + platform=Platform.GITLAB, + max_iterations=5, + output_dir='/tmp', + llm_config=LLMConfig(model='test', api_key='test'), + runtime_container_image='test-image', + prompt_template='test-template', + issue_type='pr', + repo_instruction=None, + issue_number=5432, + comment_id=None, + ) + + assert 'No issues found for issue number 5432' in str(exc_info.value) + assert 'test-owner/test-repo' in str(exc_info.value) + assert 'exists in the repository' in str(exc_info.value) + assert 'correct permissions' in str(exc_info.value) + + +def test_download_issues_from_gitlab(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), llm_config + ) + + mock_issues_response = MagicMock() + mock_issues_response.json.side_effect = [ + [ + {'iid': 1, 'title': 'Issue 1', 'description': 'This is an issue'}, + { + 'iid': 2, + 'title': 'PR 1', + 'description': 'This is a pull request', + 'pull_request': {}, + }, + {'iid': 3, 'title': 'Issue 2', 'description': 'This is another issue'}, + ], + None, + ] + mock_issues_response.raise_for_status = MagicMock() + + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [] + mock_comments_response.raise_for_status = MagicMock() + + def get_mock_response(url, *args, **kwargs): + if '/notes' in url: + return mock_comments_response + return mock_issues_response + + with patch('requests.get', side_effect=get_mock_response): + issues = handler.get_converted_issues(issue_numbers=[1, 3]) + + assert len(issues) == 2 + assert handler.issue_type == 'issue' + assert all(isinstance(issue, Issue) for issue in issues) + assert [issue.number for issue in issues] == [1, 3] + assert [issue.title for issue in issues] == ['Issue 1', 'Issue 2'] + assert [issue.review_comments for issue in issues] == [None, None] + assert [issue.closing_issues for issue in issues] == [None, None] + assert [issue.thread_ids for issue in issues] == [None, None] + + +def test_download_pr_from_gitlab(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextPR(GitlabPRHandler('owner', 'repo', 'token'), llm_config) + mock_pr_response = MagicMock() + mock_pr_response.json.side_effect = [ + [ + { + 'iid': 1, + 'title': 'PR 1', + 'description': 'This is a pull request', + 'source_branch': 'b1', + }, + { + 'iid': 2, + 'title': 'My PR', + 'description': 'This is another pull request', + 'source_branch': 'b2', + }, + { + 'iid': 3, + 'title': 'PR 3', + 'description': 'Final PR', + 'source_branch': 'b3', + }, + ], + None, + ] + mock_pr_response.raise_for_status = MagicMock() + + # Mock for related issues response + mock_related_issuse_response = MagicMock() + mock_related_issuse_response.json.return_value = [ + {'description': 'Issue 1 body', 'iid': 1}, + {'description': 'Issue 2 body', 'iid': 2}, + ] + mock_related_issuse_response.raise_for_status = MagicMock() + + # Mock for PR comments response + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [] # No PR comments + mock_comments_response.raise_for_status = MagicMock() + + # Mock for GraphQL request (for download_pr_metadata) + mock_graphql_response = MagicMock() + mock_graphql_response.json.side_effect = lambda: { + 'data': { + 'project': { + 'mergeRequest': { + 'discussions': { + 'edges': [ + { + 'node': { + 'id': '1', + 'resolved': False, + 'resolvable': True, + 'notes': { + 'nodes': [ + { + 'body': 'Unresolved comment 1', + 'position': { + 'filePath': '/frontend/header.tsx', + }, + }, + { + 'body': 'Follow up thread', + }, + ] + }, + } + }, + { + 'node': { + 'id': '2', + 'resolved': True, + 'resolvable': True, + 'notes': { + 'nodes': [ + { + 'body': 'Resolved comment 1', + 'position': { + 'filePath': '/some/file.py', + }, + }, + ] + }, + } + }, + { + 'node': { + 'id': '3', + 'resolved': False, + 'resolvable': True, + 'notes': { + 'nodes': [ + { + 'body': 'Unresolved comment 3', + 'position': { + 'filePath': '/another/file.py', + }, + }, + ] + }, + } + }, + ] + }, + } + } + } + } + + mock_graphql_response.raise_for_status = MagicMock() + + def get_mock_response(url, *args, **kwargs): + if '/notes' in url: + return mock_comments_response + if '/related_issues' in url: + return mock_related_issuse_response + return mock_pr_response + + with patch('requests.get', side_effect=get_mock_response): + with patch('requests.post', return_value=mock_graphql_response): + issues = handler.get_converted_issues(issue_numbers=[1, 2, 3]) + + assert len(issues) == 3 + assert handler.issue_type == 'pr' + assert all(isinstance(issue, Issue) for issue in issues) + assert [issue.number for issue in issues] == [1, 2, 3] + assert [issue.title for issue in issues] == ['PR 1', 'My PR', 'PR 3'] + assert [issue.head_branch for issue in issues] == ['b1', 'b2', 'b3'] + + assert len(issues[0].review_threads) == 2 # Only unresolved threads + assert ( + issues[0].review_threads[0].comment + == 'Unresolved comment 1\n---\nlatest feedback:\nFollow up thread\n' + ) + assert issues[0].review_threads[0].files == ['/frontend/header.tsx'] + assert ( + issues[0].review_threads[1].comment + == 'latest feedback:\nUnresolved comment 3\n' + ) + assert issues[0].review_threads[1].files == ['/another/file.py'] + assert issues[0].closing_issues == ['Issue 1 body', 'Issue 2 body'] + assert issues[0].thread_ids == ['1', '3'] + + +@pytest.mark.asyncio +async def test_complete_runtime(): + mock_runtime = MagicMock() + mock_runtime.run_action.side_effect = [ + create_cmd_output(exit_code=0, content='', command='cd /workspace'), + create_cmd_output( + exit_code=0, content='', command='git config --global core.pager ""' + ), + create_cmd_output( + exit_code=0, + content='', + command='git config --global --add safe.directory /workspace', + ), + create_cmd_output( + exit_code=0, content='', command='git diff base_commit_hash fix' + ), + create_cmd_output(exit_code=0, content='git diff content', command='git apply'), + ] + + result = await complete_runtime(mock_runtime, 'base_commit_hash', Platform.GITLAB) + + assert result == {'git_patch': 'git diff content'} + assert mock_runtime.run_action.call_count == 5 + + +@pytest.mark.asyncio +async def test_process_issue(mock_output_dir, mock_prompt_template): + # Mock dependencies + mock_create_runtime = MagicMock() + mock_initialize_runtime = AsyncMock() + mock_run_controller = AsyncMock() + mock_complete_runtime = AsyncMock() + handler_instance = MagicMock() + + # Set up test data + issue = Issue( + owner='test_owner', + repo='test_repo', + number=1, + title='Test Issue', + body='This is a test issue', + ) + base_commit = 'abcdef1234567890' + repo_instruction = 'Resolve this repo' + max_iterations = 5 + llm_config = LLMConfig(model='test_model', api_key='test_api_key') + runtime_container_image = 'test_image:latest' + + # Test cases for different scenarios + test_cases = [ + { + 'name': 'successful_run', + 'run_controller_return': MagicMock( + history=[NullObservation(content='')], + metrics=MagicMock( + get=MagicMock(return_value={'test_result': 'passed'}) + ), + last_error=None, + ), + 'run_controller_raises': None, + 'expected_success': True, + 'expected_error': None, + 'expected_explanation': 'Issue resolved successfully', + }, + { + 'name': 'value_error', + 'run_controller_return': None, + 'run_controller_raises': ValueError('Test value error'), + 'expected_success': False, + 'expected_error': 'Agent failed to run or crashed', + 'expected_explanation': 'Agent failed to run', + }, + { + 'name': 'runtime_error', + 'run_controller_return': None, + 'run_controller_raises': RuntimeError('Test runtime error'), + 'expected_success': False, + 'expected_error': 'Agent failed to run or crashed', + 'expected_explanation': 'Agent failed to run', + }, + { + 'name': 'json_decode_error', + 'run_controller_return': MagicMock( + history=[NullObservation(content='')], + metrics=MagicMock( + get=MagicMock(return_value={'test_result': 'passed'}) + ), + last_error=None, + ), + 'run_controller_raises': None, + 'expected_success': True, + 'expected_error': None, + 'expected_explanation': 'Non-JSON explanation', + 'is_pr': True, + 'comment_success': [ + True, + False, + ], # To trigger the PR success logging code path + }, + ] + + for test_case in test_cases: + # Reset mocks + mock_create_runtime.reset_mock() + mock_initialize_runtime.reset_mock() + mock_run_controller.reset_mock() + mock_complete_runtime.reset_mock() + handler_instance.reset_mock() + + # Mock return values + mock_create_runtime.return_value = MagicMock(connect=AsyncMock()) + if test_case['run_controller_raises']: + mock_run_controller.side_effect = test_case['run_controller_raises'] + else: + mock_run_controller.return_value = test_case['run_controller_return'] + mock_run_controller.side_effect = None + + mock_complete_runtime.return_value = {'git_patch': 'test patch'} + handler_instance.guess_success.return_value = ( + test_case['expected_success'], + test_case.get('comment_success', None), + test_case['expected_explanation'], + ) + handler_instance.get_instruction.return_value = ('Test instruction', []) + handler_instance.issue_type = 'pr' if test_case.get('is_pr', False) else 'issue' + + with ( + patch( + 'openhands.resolver.resolve_issue.create_runtime', mock_create_runtime + ), + patch( + 'openhands.resolver.resolve_issue.initialize_runtime', + mock_initialize_runtime, + ), + patch( + 'openhands.resolver.resolve_issue.run_controller', mock_run_controller + ), + patch( + 'openhands.resolver.resolve_issue.complete_runtime', + mock_complete_runtime, + ), + patch('openhands.resolver.resolve_issue.logger'), + ): + # Call the function + result = await process_issue( + issue, + Platform.GITLAB, + base_commit, + max_iterations, + llm_config, + mock_output_dir, + runtime_container_image, + mock_prompt_template, + handler_instance, + repo_instruction, + reset_logger=False, + ) + + # Assert the result + expected_issue_type = 'pr' if test_case.get('is_pr', False) else 'issue' + assert handler_instance.issue_type == expected_issue_type + assert isinstance(result, ResolverOutput) + assert result.issue == issue + assert result.base_commit == base_commit + assert result.git_patch == 'test patch' + assert result.success == test_case['expected_success'] + assert result.result_explanation == test_case['expected_explanation'] + assert result.error == test_case['expected_error'] + + # Assert that the mocked functions were called + mock_create_runtime.assert_called_once() + mock_initialize_runtime.assert_called_once() + mock_run_controller.assert_called_once() + mock_complete_runtime.assert_called_once() + + # Assert that guess_success was called only for successful runs + if test_case['expected_success']: + handler_instance.guess_success.assert_called_once() + else: + handler_instance.guess_success.assert_not_called() + + +def test_get_instruction(mock_prompt_template, mock_followup_prompt_template): + issue = Issue( + owner='test_owner', + repo='test_repo', + number=123, + title='Test Issue', + body='This is a test issue refer to image ![First Image](https://sampleimage.com/image1.png)', + ) + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + instruction, images_urls = issue_handler.get_instruction( + issue, mock_prompt_template, None + ) + expected_instruction = 'Issue: Test Issue\n\nThis is a test issue refer to image ![First Image](https://sampleimage.com/image1.png)\n\nPlease fix this issue.' + + assert images_urls == ['https://sampleimage.com/image1.png'] + assert issue_handler.issue_type == 'issue' + assert instruction == expected_instruction + + issue = Issue( + owner='test_owner', + repo='test_repo', + number=123, + title='Test Issue', + body='This is a test issue', + closing_issues=['Issue 1 fix the type'], + review_threads=[ + ReviewThread( + comment="There is still a typo 'pthon' instead of 'python'", files=[] + ) + ], + thread_comments=[ + "I've left review comments, please address them", + 'This is a valid concern.', + ], + ) + + pr_handler = ServiceContextPR( + GitlabPRHandler('owner', 'repo', 'token'), mock_llm_config + ) + instruction, images_urls = pr_handler.get_instruction( + issue, mock_followup_prompt_template, None + ) + expected_instruction = "Issue context: [\n \"Issue 1 fix the type\"\n]\n\nReview comments: None\n\nReview threads: [\n \"There is still a typo 'pthon' instead of 'python'\"\n]\n\nFiles: []\n\nThread comments: I've left review comments, please address them\n---\nThis is a valid concern.\n\nPlease fix this issue." + + assert images_urls == [] + assert pr_handler.issue_type == 'pr' + assert instruction == expected_instruction + + +def test_file_instruction(): + issue = Issue( + owner='test_owner', + repo='test_repo', + number=123, + title='Test Issue', + body='This is a test issue ![image](https://sampleimage.com/sample.png)', + ) + # load prompt from openhands/resolver/prompts/resolve/basic.jinja + with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f: + prompt = f.read() + # Test without thread comments + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + instruction, images_urls = issue_handler.get_instruction(issue, prompt, None) + expected_instruction = """Please fix the following issue for the repository in /workspace. +An environment has been set up for you to start working. You may assume all necessary tools are installed. + +# Problem Statement +Test Issue + +This is a test issue ![image](https://sampleimage.com/sample.png) + +IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP. +You SHOULD INCLUDE PROPER INDENTATION in your edit commands. + +When you think you have fixed the issue through code changes, please finish the interaction.""" + + assert instruction == expected_instruction + assert images_urls == ['https://sampleimage.com/sample.png'] + + +def test_file_instruction_with_repo_instruction(): + issue = Issue( + owner='test_owner', + repo='test_repo', + number=123, + title='Test Issue', + body='This is a test issue', + ) + # load prompt from openhands/resolver/prompts/resolve/basic.jinja + with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f: + prompt = f.read() + # load repo instruction from openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt + with open( + 'openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt', + 'r', + ) as f: + repo_instruction = f.read() + + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + instruction, image_urls = issue_handler.get_instruction( + issue, prompt, repo_instruction + ) + expected_instruction = """Please fix the following issue for the repository in /workspace. +An environment has been set up for you to start working. You may assume all necessary tools are installed. + +# Problem Statement +Test Issue + +This is a test issue + +IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP. +You SHOULD INCLUDE PROPER INDENTATION in your edit commands. + +Some basic information about this repository: +This is a Python repo for openhands-resolver, a library that attempts to resolve github issues with the AI agent OpenHands. + +- Setup: `poetry install --with test --with dev` +- Testing: `poetry run pytest tests/test_*.py` + + +When you think you have fixed the issue through code changes, please finish the interaction.""" + assert instruction == expected_instruction + assert issue_handler.issue_type == 'issue' + assert image_urls == [] + + +def test_guess_success(): + mock_issue = Issue( + owner='test_owner', + repo='test_repo', + number=1, + title='Test Issue', + body='This is a test issue', + ) + mock_history = [create_cmd_output(exit_code=0, content='', command='cd /workspace')] + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + + mock_completion_response = MagicMock() + mock_completion_response.choices = [ + MagicMock( + message=MagicMock( + content='--- success\ntrue\n--- explanation\nIssue resolved successfully' + ) + ) + ] + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + + with patch.object( + LLM, 'completion', MagicMock(return_value=mock_completion_response) + ): + success, comment_success, explanation = issue_handler.guess_success( + mock_issue, mock_history + ) + assert issue_handler.issue_type == 'issue' + assert comment_success is None + assert success + assert explanation == 'Issue resolved successfully' + + +def test_guess_success_with_thread_comments(): + mock_issue = Issue( + owner='test_owner', + repo='test_repo', + number=1, + title='Test Issue', + body='This is a test issue', + thread_comments=[ + 'First comment', + 'Second comment', + 'latest feedback:\nPlease add tests', + ], + ) + mock_history = [MagicMock(message='I have added tests for this case')] + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + + mock_completion_response = MagicMock() + mock_completion_response.choices = [ + MagicMock( + message=MagicMock( + content='--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling' + ) + ) + ] + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + + with patch.object( + LLM, 'completion', MagicMock(return_value=mock_completion_response) + ): + success, comment_success, explanation = issue_handler.guess_success( + mock_issue, mock_history + ) + assert issue_handler.issue_type == 'issue' + assert comment_success is None + assert success + assert 'Tests have been added' in explanation + + +def test_instruction_with_thread_comments(): + # Create an issue with thread comments + issue = Issue( + owner='test_owner', + repo='test_repo', + number=123, + title='Test Issue', + body='This is a test issue', + thread_comments=[ + 'First comment', + 'Second comment', + 'latest feedback:\nPlease add tests', + ], + ) + + # Load the basic prompt template + with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f: + prompt = f.read() + + llm_config = LLMConfig(model='test', api_key='test') + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), llm_config + ) + instruction, images_urls = issue_handler.get_instruction(issue, prompt, None) + + # Verify that thread comments are included in the instruction + assert 'First comment' in instruction + assert 'Second comment' in instruction + assert 'Please add tests' in instruction + assert 'Issue Thread Comments:' in instruction + assert images_urls == [] + + +def test_guess_success_failure(): + mock_issue = Issue( + owner='test_owner', + repo='test_repo', + number=1, + title='Test Issue', + body='This is a test issue', + thread_comments=[ + 'First comment', + 'Second comment', + 'latest feedback:\nPlease add tests', + ], + ) + mock_history = [MagicMock(message='I have added tests for this case')] + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + + mock_completion_response = MagicMock() + mock_completion_response.choices = [ + MagicMock( + message=MagicMock( + content='--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling' + ) + ) + ] + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + + with patch.object( + LLM, 'completion', MagicMock(return_value=mock_completion_response) + ): + success, comment_success, explanation = issue_handler.guess_success( + mock_issue, mock_history + ) + assert issue_handler.issue_type == 'issue' + assert comment_success is None + assert success + assert 'Tests have been added' in explanation + + +def test_guess_success_negative_case(): + mock_issue = Issue( + owner='test_owner', + repo='test_repo', + number=1, + title='Test Issue', + body='This is a test issue', + ) + mock_history = [create_cmd_output(exit_code=0, content='', command='cd /workspace')] + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + + mock_completion_response = MagicMock() + mock_completion_response.choices = [ + MagicMock( + message=MagicMock( + content='--- success\nfalse\n--- explanation\nIssue not resolved' + ) + ) + ] + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + + with patch.object( + LLM, 'completion', MagicMock(return_value=mock_completion_response) + ): + success, comment_success, explanation = issue_handler.guess_success( + mock_issue, mock_history + ) + assert issue_handler.issue_type == 'issue' + assert comment_success is None + assert not success + assert explanation == 'Issue not resolved' + + +def test_guess_success_invalid_output(): + mock_issue = Issue( + owner='test_owner', + repo='test_repo', + number=1, + title='Test Issue', + body='This is a test issue', + ) + mock_history = [create_cmd_output(exit_code=0, content='', command='cd /workspace')] + mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key') + + mock_completion_response = MagicMock() + mock_completion_response.choices = [ + MagicMock(message=MagicMock(content='This is not a valid output')) + ] + issue_handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), mock_llm_config + ) + + with patch.object( + LLM, 'completion', MagicMock(return_value=mock_completion_response) + ): + success, comment_success, explanation = issue_handler.guess_success( + mock_issue, mock_history + ) + assert issue_handler.issue_type == 'issue' + assert comment_success is None + assert not success + assert ( + explanation + == 'Failed to decode answer from LLM response: This is not a valid output' + ) + + +def test_download_issue_with_specific_comment(): + llm_config = LLMConfig(model='test', api_key='test') + handler = ServiceContextIssue( + GitlabIssueHandler('owner', 'repo', 'token'), llm_config + ) + + # Define the specific comment_id to filter + specific_comment_id = 101 + + # Mock issue and comment responses + mock_issue_response = MagicMock() + mock_issue_response.json.side_effect = [ + [ + {'iid': 1, 'title': 'Issue 1', 'description': 'This is an issue'}, + ], + None, + ] + mock_issue_response.raise_for_status = MagicMock() + + mock_comments_response = MagicMock() + mock_comments_response.json.return_value = [ + { + 'id': specific_comment_id, + 'body': 'Specific comment body', + }, + { + 'id': 102, + 'body': 'Another comment body', + }, + ] + mock_comments_response.raise_for_status = MagicMock() + + def get_mock_response(url, *args, **kwargs): + if '/notes' in url: + return mock_comments_response + + return mock_issue_response + + with patch('requests.get', side_effect=get_mock_response): + issues = handler.get_converted_issues( + issue_numbers=[1], comment_id=specific_comment_id + ) + + assert len(issues) == 1 + assert issues[0].number == 1 + assert issues[0].title == 'Issue 1' + assert issues[0].thread_comments == ['Specific comment body'] + + +if __name__ == '__main__': + pytest.main() diff --git a/tests/unit/resolver/gitlab/test_gitlab_send_pull_request.py b/tests/unit/resolver/gitlab/test_gitlab_send_pull_request.py new file mode 100644 index 000000000000..4b198e471c86 --- /dev/null +++ b/tests/unit/resolver/gitlab/test_gitlab_send_pull_request.py @@ -0,0 +1,1335 @@ +import os +import tempfile +from unittest.mock import MagicMock, call, patch +from urllib.parse import quote + +import pytest + +from openhands.core.config import LLMConfig +from openhands.resolver.interfaces.gitlab import GitlabIssueHandler +from openhands.resolver.interfaces.issue import ReviewThread +from openhands.resolver.resolver_output import Issue, ResolverOutput +from openhands.resolver.send_pull_request import ( + apply_patch, + initialize_repo, + load_single_resolver_output, + make_commit, + process_all_successful_issues, + process_single_issue, + send_pull_request, + update_existing_pull_request, +) +from openhands.resolver.utils import Platform + + +@pytest.fixture +def mock_output_dir(): + with tempfile.TemporaryDirectory() as temp_dir: + repo_path = os.path.join(temp_dir, 'repo') + # Initialize a Gitlab repo in "repo" and add a commit with "README.md" + os.makedirs(repo_path) + os.system(f'git init {repo_path}') + readme_path = os.path.join(repo_path, 'README.md') + with open(readme_path, 'w') as f: + f.write('hello world') + os.system(f'git -C {repo_path} add README.md') + os.system(f"git -C {repo_path} commit -m 'Initial commit'") + yield temp_dir + + +@pytest.fixture +def mock_issue(): + return Issue( + number=42, + title='Test Issue', + owner='test-owner', + repo='test-repo', + body='Test body', + ) + + +@pytest.fixture +def mock_llm_config(): + return LLMConfig() + + +def test_load_single_resolver_output(): + mock_output_jsonl = 'tests/unit/resolver/mock_output/output.jsonl' + + # Test loading an existing issue + resolver_output = load_single_resolver_output(mock_output_jsonl, 5) + assert isinstance(resolver_output, ResolverOutput) + assert resolver_output.issue.number == 5 + assert resolver_output.issue.title == 'Add MIT license' + assert resolver_output.issue.owner == 'neubig' + assert resolver_output.issue.repo == 'pr-viewer' + + # Test loading a non-existent issue + with pytest.raises(ValueError): + load_single_resolver_output(mock_output_jsonl, 999) + + +def test_apply_patch(mock_output_dir): + # Create a sample file in the mock repo + sample_file = os.path.join(mock_output_dir, 'sample.txt') + with open(sample_file, 'w') as f: + f.write('Original content') + + # Create a sample patch + patch_content = """ +diff --git a/sample.txt b/sample.txt +index 9daeafb..b02def2 100644 +--- a/sample.txt ++++ b/sample.txt +@@ -1 +1,2 @@ +-Original content ++Updated content ++New line +""" + + # Apply the patch + apply_patch(mock_output_dir, patch_content) + + # Check if the file was updated correctly + with open(sample_file, 'r') as f: + updated_content = f.read() + + assert updated_content.strip() == 'Updated content\nNew line'.strip() + + +def test_apply_patch_preserves_line_endings(mock_output_dir): + # Create sample files with different line endings + unix_file = os.path.join(mock_output_dir, 'unix_style.txt') + dos_file = os.path.join(mock_output_dir, 'dos_style.txt') + + with open(unix_file, 'w', newline='\n') as f: + f.write('Line 1\nLine 2\nLine 3') + + with open(dos_file, 'w', newline='\r\n') as f: + f.write('Line 1\r\nLine 2\r\nLine 3') + + # Create patches for both files + unix_patch = """ +diff --git a/unix_style.txt b/unix_style.txt +index 9daeafb..b02def2 100644 +--- a/unix_style.txt ++++ b/unix_style.txt +@@ -1,3 +1,3 @@ + Line 1 +-Line 2 ++Updated Line 2 + Line 3 +""" + + dos_patch = """ +diff --git a/dos_style.txt b/dos_style.txt +index 9daeafb..b02def2 100644 +--- a/dos_style.txt ++++ b/dos_style.txt +@@ -1,3 +1,3 @@ + Line 1 +-Line 2 ++Updated Line 2 + Line 3 +""" + + # Apply patches + apply_patch(mock_output_dir, unix_patch) + apply_patch(mock_output_dir, dos_patch) + + # Check if line endings are preserved + with open(unix_file, 'rb') as f: + unix_content = f.read() + with open(dos_file, 'rb') as f: + dos_content = f.read() + + assert ( + b'\r\n' not in unix_content + ), 'Unix-style line endings were changed to DOS-style' + assert b'\r\n' in dos_content, 'DOS-style line endings were changed to Unix-style' + + # Check if content was updated correctly + assert unix_content.decode('utf-8').split('\n')[1] == 'Updated Line 2' + assert dos_content.decode('utf-8').split('\r\n')[1] == 'Updated Line 2' + + +def test_apply_patch_create_new_file(mock_output_dir): + # Create a patch that adds a new file + patch_content = """ +diff --git a/new_file.txt b/new_file.txt +new file mode 100644 +index 0000000..3b18e51 +--- /dev/null ++++ b/new_file.txt +@@ -0,0 +1 @@ ++hello world +""" + + # Apply the patch + apply_patch(mock_output_dir, patch_content) + + # Check if the new file was created + new_file_path = os.path.join(mock_output_dir, 'new_file.txt') + assert os.path.exists(new_file_path), 'New file was not created' + + # Check if the file content is correct + with open(new_file_path, 'r') as f: + content = f.read().strip() + assert content == 'hello world', 'File content is incorrect' + + +def test_apply_patch_rename_file(mock_output_dir): + # Create a sample file in the mock repo + old_file = os.path.join(mock_output_dir, 'old_name.txt') + with open(old_file, 'w') as f: + f.write('This file will be renamed') + + # Create a patch that renames the file + patch_content = """diff --git a/old_name.txt b/new_name.txt +similarity index 100% +rename from old_name.txt +rename to new_name.txt""" + + # Apply the patch + apply_patch(mock_output_dir, patch_content) + + # Check if the file was renamed + new_file = os.path.join(mock_output_dir, 'new_name.txt') + assert not os.path.exists(old_file), 'Old file still exists' + assert os.path.exists(new_file), 'New file was not created' + + # Check if the content is preserved + with open(new_file, 'r') as f: + content = f.read() + assert content == 'This file will be renamed' + + +def test_apply_patch_delete_file(mock_output_dir): + # Create a sample file in the mock repo + sample_file = os.path.join(mock_output_dir, 'to_be_deleted.txt') + with open(sample_file, 'w') as f: + f.write('This file will be deleted') + + # Create a patch that deletes the file + patch_content = """ +diff --git a/to_be_deleted.txt b/to_be_deleted.txt +deleted file mode 100644 +index 9daeafb..0000000 +--- a/to_be_deleted.txt ++++ /dev/null +@@ -1 +0,0 @@ +-This file will be deleted +""" + + # Apply the patch + apply_patch(mock_output_dir, patch_content) + + # Check if the file was deleted + assert not os.path.exists(sample_file), 'File was not deleted' + + +def test_initialize_repo(mock_output_dir): + issue_type = 'issue' + # Copy the repo to patches + ISSUE_NUMBER = 3 + initialize_repo(mock_output_dir, ISSUE_NUMBER, issue_type) + patches_dir = os.path.join(mock_output_dir, 'patches', f'issue_{ISSUE_NUMBER}') + + # Check if files were copied correctly + assert os.path.exists(os.path.join(patches_dir, 'README.md')) + + # Check file contents + with open(os.path.join(patches_dir, 'README.md'), 'r') as f: + assert f.read() == 'hello world' + + +@patch('openhands.resolver.interfaces.gitlab.GitlabIssueHandler.reply_to_comment') +@patch('requests.post') +@patch('subprocess.run') +@patch('openhands.resolver.send_pull_request.LLM') +def test_update_existing_pull_request( + mock_llm_class, + mock_subprocess_run, + mock_requests_post, + mock_reply_to_comment, +): + # Arrange: Set up test data + issue = Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Test PR', + body='This is a test PR', + thread_ids=['comment1', 'comment2'], + head_branch='test-branch', + ) + token = 'test-token' + username = 'test-user' + patch_dir = '/path/to/patch' + additional_message = '["Fixed bug in function A", "Updated documentation for B"]' + + # Mock the subprocess.run call for git push + mock_subprocess_run.return_value = MagicMock(returncode=0) + + # Mock the requests.post call for adding a PR comment + mock_requests_post.return_value.status_code = 201 + + # Mock LLM instance and completion call + mock_llm_instance = MagicMock() + mock_completion_response = MagicMock() + mock_completion_response.choices = [ + MagicMock(message=MagicMock(content='This is an issue resolution.')) + ] + mock_llm_instance.completion.return_value = mock_completion_response + mock_llm_class.return_value = mock_llm_instance + + llm_config = LLMConfig() + + # Act: Call the function without comment_message to test auto-generation + result = update_existing_pull_request( + issue, + token, + username, + Platform.GITLAB, + patch_dir, + llm_config, + comment_message=None, + additional_message=additional_message, + ) + + # Assert: Check if the git push command was executed + push_command = ( + f'git -C {patch_dir} push ' + f'https://{username}:{token}@gitlab.com/' + f'{issue.owner}/{issue.repo}.git {issue.head_branch}' + ) + mock_subprocess_run.assert_called_once_with( + push_command, shell=True, capture_output=True, text=True + ) + + # Assert: Check if the auto-generated comment was posted to the PR + comment_url = f'https://gitlab.com/api/v4/projects/{quote(f'{issue.owner}/{issue.repo}', safe="")}/issues/{issue.number}/notes' + expected_comment = 'This is an issue resolution.' + mock_requests_post.assert_called_once_with( + comment_url, + headers={ + 'Authorization': f'Bearer {token}', + 'Accept': 'application/json', + }, + json={'body': expected_comment}, + ) + + # Assert: Check if the reply_to_comment function was called for each thread ID + mock_reply_to_comment.assert_has_calls( + [ + call(issue.number, 'comment1', 'Fixed bug in function A'), + call(issue.number, 'comment2', 'Updated documentation for B'), + ] + ) + + # Assert: Check the returned PR URL + assert ( + result + == f'https://gitlab.com/{issue.owner}/{issue.repo}/-/merge_requests/{issue.number}' + ) + + +@pytest.mark.parametrize( + 'pr_type,target_branch,pr_title', + [ + ('branch', None, None), + ('draft', None, None), + ('ready', None, None), + ('branch', 'feature', None), + ('draft', 'develop', None), + ('ready', 'staging', None), + ('ready', None, 'Custom PR Title'), + ('draft', 'develop', 'Another Custom Title'), + ], +) +@patch('subprocess.run') +@patch('requests.post') +@patch('requests.get') +def test_send_pull_request( + mock_get, + mock_post, + mock_run, + mock_issue, + mock_llm_config, + mock_output_dir, + pr_type, + target_branch, + pr_title, +): + repo_path = os.path.join(mock_output_dir, 'repo') + + # Mock API responses based on whether target_branch is specified + if target_branch: + mock_get.side_effect = [ + MagicMock(status_code=404), # Branch doesn't exist + MagicMock(status_code=200), # Target branch exists + MagicMock(json=lambda: {'default_branch': 'main'}), # Get default branch + ] + else: + mock_get.side_effect = [ + MagicMock(status_code=404), # Branch doesn't exist + MagicMock(json=lambda: {'default_branch': 'main'}), # Get default branch + MagicMock(json=lambda: {'default_branch': 'main'}), # Get default branch + ] + + mock_post.return_value.json.return_value = { + 'web_url': 'https://gitlab.com/test-owner/test-repo/-/merge_requests/1', + } + + # Mock subprocess.run calls + mock_run.side_effect = [ + MagicMock(returncode=0), # git checkout -b + MagicMock(returncode=0), # git push + ] + + # Call the function + result = send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITLAB, + patch_dir=repo_path, + pr_type=pr_type, + target_branch=target_branch, + pr_title=pr_title, + ) + + # Assert API calls + expected_get_calls = 2 + if pr_type == 'branch': + expected_get_calls = 3 + + assert mock_get.call_count == expected_get_calls + + # Check branch creation and push + assert mock_run.call_count == 2 + checkout_call, push_call = mock_run.call_args_list + + assert checkout_call == call( + ['git', '-C', repo_path, 'checkout', '-b', 'openhands-fix-issue-42'], + capture_output=True, + text=True, + ) + assert push_call == call( + [ + 'git', + '-C', + repo_path, + 'push', + 'https://test-user:test-token@gitlab.com/test-owner/test-repo.git', + 'openhands-fix-issue-42', + ], + capture_output=True, + text=True, + ) + + # Check PR creation based on pr_type + if pr_type == 'branch': + assert ( + result + == 'https://gitlab.com/test-owner/test-repo/-/compare/main...openhands-fix-issue-42' + ) + mock_post.assert_not_called() + else: + assert result == 'https://gitlab.com/test-owner/test-repo/-/merge_requests/1' + mock_post.assert_called_once() + post_data = mock_post.call_args[1]['json'] + expected_title = pr_title if pr_title else 'Fix issue #42: Test Issue' + assert post_data['title'] == expected_title + assert post_data['description'].startswith('This pull request fixes #42.') + assert post_data['source_branch'] == 'openhands-fix-issue-42' + assert post_data['target_branch'] == ( + target_branch if target_branch else 'main' + ) + assert post_data['draft'] == (pr_type == 'draft') + + +@patch('subprocess.run') +@patch('requests.post') +@patch('requests.put') +@patch('requests.get') +def test_send_pull_request_with_reviewer( + mock_get, + mock_put, + mock_post, + mock_run, + mock_issue, + mock_output_dir, + mock_llm_config, +): + repo_path = os.path.join(mock_output_dir, 'repo') + reviewer = 'test-reviewer' + + # Mock API responses + mock_get.side_effect = [ + MagicMock(status_code=404), # Branch doesn't exist + MagicMock(json=lambda: {'default_branch': 'main'}), # Get default branch + MagicMock(json=lambda: [{'id': 123}]), # Get user data + ] + + # Mock PR creation response + mock_post.side_effect = [ + MagicMock( + status_code=200, + json=lambda: { + 'web_url': 'https://gitlab.com/test-owner/test-repo/-/merge_requests/1', + 'iid': 1, + }, + ), # PR creation + ] + + # Mock request reviwers response + mock_put.side_effect = [ + MagicMock(status_code=200), # Reviewer request + ] + + # Mock subprocess.run calls + mock_run.side_effect = [ + MagicMock(returncode=0), # git checkout -b + MagicMock(returncode=0), # git push + ] + + # Call the function with reviewer + result = send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITLAB, + patch_dir=repo_path, + pr_type='ready', + reviewer=reviewer, + ) + + # Assert API calls + assert mock_get.call_count == 3 + assert mock_post.call_count == 1 + assert mock_put.call_count == 1 + + # Check PR creation + pr_create_call = mock_post.call_args_list[0] + assert pr_create_call[1]['json']['title'] == 'Fix issue #42: Test Issue' + + # Check reviewer request + reviewer_request_call = mock_put.call_args_list[0] + assert ( + reviewer_request_call[0][0] + == 'https://gitlab.com/api/v4/projects/test-owner%2Ftest-repo/merge_requests/1' + ) + assert reviewer_request_call[1]['json'] == {'reviewer_ids': [123]} + + # Check the result URL + assert result == 'https://gitlab.com/test-owner/test-repo/-/merge_requests/1' + + +@patch('requests.get') +def test_send_pull_request_invalid_target_branch( + mock_get, mock_issue, mock_output_dir, mock_llm_config +): + """Test that an error is raised when specifying a non-existent target branch""" + repo_path = os.path.join(mock_output_dir, 'repo') + + # Mock API response for non-existent branch + mock_get.side_effect = [ + MagicMock(status_code=404), # Branch doesn't exist + MagicMock(status_code=404), # Target branch doesn't exist + ] + + # Test that ValueError is raised when target branch doesn't exist + with pytest.raises( + ValueError, match='Target branch nonexistent-branch does not exist' + ): + send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITLAB, + patch_dir=repo_path, + pr_type='ready', + target_branch='nonexistent-branch', + ) + + # Verify API calls + assert mock_get.call_count == 2 + + +@patch('subprocess.run') +@patch('requests.post') +@patch('requests.get') +def test_send_pull_request_git_push_failure( + mock_get, mock_post, mock_run, mock_issue, mock_output_dir, mock_llm_config +): + repo_path = os.path.join(mock_output_dir, 'repo') + + # Mock API responses + mock_get.return_value = MagicMock(json=lambda: {'default_branch': 'main'}) + + # Mock the subprocess.run calls + mock_run.side_effect = [ + MagicMock(returncode=0), # git checkout -b + MagicMock(returncode=1, stderr='Error: failed to push some refs'), # git push + ] + + # Test that RuntimeError is raised when git push fails + with pytest.raises( + RuntimeError, match='Failed to push changes to the remote repository' + ): + send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITLAB, + patch_dir=repo_path, + pr_type='ready', + ) + + # Assert that subprocess.run was called twice + assert mock_run.call_count == 2 + + # Check the git checkout -b command + checkout_call = mock_run.call_args_list[0] + assert checkout_call[0][0] == [ + 'git', + '-C', + repo_path, + 'checkout', + '-b', + 'openhands-fix-issue-42', + ] + + # Check the git push command + push_call = mock_run.call_args_list[1] + assert push_call[0][0] == [ + 'git', + '-C', + repo_path, + 'push', + 'https://test-user:test-token@gitlab.com/test-owner/test-repo.git', + 'openhands-fix-issue-42', + ] + + # Assert that no pull request was created + mock_post.assert_not_called() + + +@patch('subprocess.run') +@patch('requests.post') +@patch('requests.get') +def test_send_pull_request_permission_error( + mock_get, mock_post, mock_run, mock_issue, mock_output_dir, mock_llm_config +): + repo_path = os.path.join(mock_output_dir, 'repo') + + # Mock API responses + mock_get.return_value = MagicMock(json=lambda: {'default_branch': 'main'}) + mock_post.return_value.status_code = 403 + + # Mock subprocess.run calls + mock_run.side_effect = [ + MagicMock(returncode=0), # git checkout -b + MagicMock(returncode=0), # git push + ] + + # Test that RuntimeError is raised when PR creation fails due to permissions + with pytest.raises( + RuntimeError, match='Failed to create pull request due to missing permissions.' + ): + send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITLAB, + patch_dir=repo_path, + pr_type='ready', + ) + + # Assert that the branch was created and pushed + assert mock_run.call_count == 2 + mock_post.assert_called_once() + + +@patch('requests.post') +@patch('requests.get') +def test_reply_to_comment(mock_get, mock_post, mock_issue): + # Arrange: set up the test data + token = 'test_token' + comment_id = 'GID/test_comment_id' + reply = 'This is a test reply.' + + # Create an instance of GitlabIssueHandler + handler = GitlabIssueHandler( + owner='test-owner', repo='test-repo', token=token, username='test-user' + ) + + mock_get.return_value = MagicMock( + json=lambda: { + 'notes': [ + { + 'id': 123, + } + ] + } + ) + + # Mock the response from the GraphQL API + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + 'id': 123, + 'body': 'Openhands fix success summary\n\n\nThis is a test reply.', + 'createdAt': '2024-10-01T12:34:56Z', + } + + mock_post.return_value = mock_response + + # Act: call the function + handler.reply_to_comment(mock_issue.number, comment_id, reply) + + # Assert: check that the POST request was made with the correct parameters + data = { + 'body': 'Openhands fix success summary\n\n\nThis is a test reply.', + 'note_id': 123, + } + + # Check that the correct request was made to the API + mock_post.assert_called_once_with( + f'https://gitlab.com/api/v4/projects/{quote(f'{mock_issue.owner}/{mock_issue.repo}', safe="")}/merge_requests/{mock_issue.number}/discussions/{comment_id.split('/')[-1]}/notes', + headers={ + 'Authorization': f'Bearer {token}', + 'Accept': 'application/json', + }, + json=data, + ) + + # Check that the response status was checked (via response.raise_for_status) + mock_response.raise_for_status.assert_called_once() + + +@patch('openhands.resolver.send_pull_request.initialize_repo') +@patch('openhands.resolver.send_pull_request.apply_patch') +@patch('openhands.resolver.send_pull_request.update_existing_pull_request') +@patch('openhands.resolver.send_pull_request.make_commit') +def test_process_single_pr_update( + mock_make_commit, + mock_update_existing_pull_request, + mock_apply_patch, + mock_initialize_repo, + mock_output_dir, + mock_llm_config, +): + # Initialize test data + token = 'test_token' + username = 'test_user' + pr_type = 'draft' + + resolver_output = ResolverOutput( + issue=Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Issue 1', + body='Body 1', + closing_issues=[], + review_threads=[ + ReviewThread(comment='review comment for feedback', files=[]) + ], + thread_ids=['1'], + head_branch='branch 1', + ), + issue_type='pr', + instruction='Test instruction 1', + base_commit='def456', + git_patch='Test patch 1', + history=[], + metrics={}, + success=True, + comment_success=None, + result_explanation='[Test success 1]', + error=None, + ) + + mock_update_existing_pull_request.return_value = ( + 'https://gitlab.com/test-owner/test-repo/-/merge_requests/1' + ) + mock_initialize_repo.return_value = f'{mock_output_dir}/patches/pr_1' + + process_single_issue( + mock_output_dir, + resolver_output, + token, + username, + Platform.GITLAB, + pr_type, + mock_llm_config, + None, + False, + None, + ) + + mock_initialize_repo.assert_called_once_with(mock_output_dir, 1, 'pr', 'branch 1') + mock_apply_patch.assert_called_once_with( + f'{mock_output_dir}/patches/pr_1', resolver_output.git_patch + ) + mock_make_commit.assert_called_once_with( + f'{mock_output_dir}/patches/pr_1', resolver_output.issue, 'pr' + ) + mock_update_existing_pull_request.assert_called_once_with( + issue=resolver_output.issue, + token=token, + username=username, + platform=Platform.GITLAB, + patch_dir=f'{mock_output_dir}/patches/pr_1', + additional_message='[Test success 1]', + llm_config=mock_llm_config, + ) + + +@patch('openhands.resolver.send_pull_request.initialize_repo') +@patch('openhands.resolver.send_pull_request.apply_patch') +@patch('openhands.resolver.send_pull_request.send_pull_request') +@patch('openhands.resolver.send_pull_request.make_commit') +def test_process_single_issue( + mock_make_commit, + mock_send_pull_request, + mock_apply_patch, + mock_initialize_repo, + mock_output_dir, + mock_llm_config, +): + # Initialize test data + token = 'test_token' + username = 'test_user' + pr_type = 'draft' + platform = Platform.GITLAB + + resolver_output = ResolverOutput( + issue=Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Issue 1', + body='Body 1', + ), + issue_type='issue', + instruction='Test instruction 1', + base_commit='def456', + git_patch='Test patch 1', + history=[], + metrics={}, + success=True, + comment_success=None, + result_explanation='Test success 1', + error=None, + ) + + # Mock return value + mock_send_pull_request.return_value = ( + 'https://gitlab.com/test-owner/test-repo/-/merge_requests/1' + ) + mock_initialize_repo.return_value = f'{mock_output_dir}/patches/issue_1' + + # Call the function + process_single_issue( + mock_output_dir, + resolver_output, + token, + username, + platform, + pr_type, + mock_llm_config, + None, + False, + None, + ) + + # Assert that the mocked functions were called with correct arguments + mock_initialize_repo.assert_called_once_with(mock_output_dir, 1, 'issue', 'def456') + mock_apply_patch.assert_called_once_with( + f'{mock_output_dir}/patches/issue_1', resolver_output.git_patch + ) + mock_make_commit.assert_called_once_with( + f'{mock_output_dir}/patches/issue_1', resolver_output.issue, 'issue' + ) + mock_send_pull_request.assert_called_once_with( + issue=resolver_output.issue, + token=token, + username=username, + platform=platform, + patch_dir=f'{mock_output_dir}/patches/issue_1', + pr_type=pr_type, + fork_owner=None, + additional_message=resolver_output.result_explanation, + target_branch=None, + reviewer=None, + pr_title=None, + ) + + +@patch('openhands.resolver.send_pull_request.initialize_repo') +@patch('openhands.resolver.send_pull_request.apply_patch') +@patch('openhands.resolver.send_pull_request.send_pull_request') +@patch('openhands.resolver.send_pull_request.make_commit') +def test_process_single_issue_unsuccessful( + mock_make_commit, + mock_send_pull_request, + mock_apply_patch, + mock_initialize_repo, + mock_output_dir, + mock_llm_config, +): + # Initialize test data + token = 'test_token' + username = 'test_user' + pr_type = 'draft' + + resolver_output = ResolverOutput( + issue=Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Issue 1', + body='Body 1', + ), + issue_type='issue', + instruction='Test instruction 1', + base_commit='def456', + git_patch='Test patch 1', + history=[], + metrics={}, + success=False, + comment_success=None, + result_explanation='', + error='Test error', + ) + + # Call the function + process_single_issue( + mock_output_dir, + resolver_output, + token, + username, + Platform.GITLAB, + pr_type, + mock_llm_config, + None, + False, + None, + ) + + # Assert that none of the mocked functions were called + mock_initialize_repo.assert_not_called() + mock_apply_patch.assert_not_called() + mock_make_commit.assert_not_called() + mock_send_pull_request.assert_not_called() + + +@patch('openhands.resolver.send_pull_request.load_all_resolver_outputs') +@patch('openhands.resolver.send_pull_request.process_single_issue') +def test_process_all_successful_issues( + mock_process_single_issue, mock_load_all_resolver_outputs, mock_llm_config +): + # Create ResolverOutput objects with properly initialized GitlabIssue instances + resolver_output_1 = ResolverOutput( + issue=Issue( + owner='test-owner', + repo='test-repo', + number=1, + title='Issue 1', + body='Body 1', + ), + issue_type='issue', + instruction='Test instruction 1', + base_commit='def456', + git_patch='Test patch 1', + history=[], + metrics={}, + success=True, + comment_success=None, + result_explanation='Test success 1', + error=None, + ) + + resolver_output_2 = ResolverOutput( + issue=Issue( + owner='test-owner', + repo='test-repo', + number=2, + title='Issue 2', + body='Body 2', + ), + issue_type='issue', + instruction='Test instruction 2', + base_commit='ghi789', + git_patch='Test patch 2', + history=[], + metrics={}, + success=False, + comment_success=None, + result_explanation='', + error='Test error 2', + ) + + resolver_output_3 = ResolverOutput( + issue=Issue( + owner='test-owner', + repo='test-repo', + number=3, + title='Issue 3', + body='Body 3', + ), + issue_type='issue', + instruction='Test instruction 3', + base_commit='jkl012', + git_patch='Test patch 3', + history=[], + metrics={}, + success=True, + comment_success=None, + result_explanation='Test success 3', + error=None, + ) + + mock_load_all_resolver_outputs.return_value = [ + resolver_output_1, + resolver_output_2, + resolver_output_3, + ] + + # Call the function + process_all_successful_issues( + 'output_dir', + 'token', + 'username', + Platform.GITLAB, + 'draft', + mock_llm_config, # llm_config + None, # fork_owner + ) + + # Assert that process_single_issue was called for successful issues only + assert mock_process_single_issue.call_count == 2 + + # Check that the function was called with the correct arguments for successful issues + mock_process_single_issue.assert_has_calls( + [ + call( + 'output_dir', + resolver_output_1, + 'token', + 'username', + Platform.GITLAB, + 'draft', + mock_llm_config, + None, + False, + None, + ), + call( + 'output_dir', + resolver_output_3, + 'token', + 'username', + Platform.GITLAB, + 'draft', + mock_llm_config, + None, + False, + None, + ), + ] + ) + + # Add more assertions as needed to verify the behavior of the function + + +@patch('requests.get') +@patch('subprocess.run') +def test_send_pull_request_branch_naming( + mock_run, mock_get, mock_issue, mock_output_dir, mock_llm_config +): + repo_path = os.path.join(mock_output_dir, 'repo') + + # Mock API responses + mock_get.side_effect = [ + MagicMock(status_code=200), # First branch exists + MagicMock(status_code=200), # Second branch exists + MagicMock(status_code=404), # Third branch doesn't exist + MagicMock(json=lambda: {'default_branch': 'main'}), # Get default branch + MagicMock(json=lambda: {'default_branch': 'main'}), # Get default branch + ] + + # Mock subprocess.run calls + mock_run.side_effect = [ + MagicMock(returncode=0), # git checkout -b + MagicMock(returncode=0), # git push + ] + + # Call the function + result = send_pull_request( + issue=mock_issue, + token='test-token', + username='test-user', + platform=Platform.GITLAB, + patch_dir=repo_path, + pr_type='branch', + ) + + # Assert API calls + assert mock_get.call_count == 5 + + # Check branch creation and push + assert mock_run.call_count == 2 + checkout_call, push_call = mock_run.call_args_list + + assert checkout_call == call( + ['git', '-C', repo_path, 'checkout', '-b', 'openhands-fix-issue-42-try3'], + capture_output=True, + text=True, + ) + assert push_call == call( + [ + 'git', + '-C', + repo_path, + 'push', + 'https://test-user:test-token@gitlab.com/test-owner/test-repo.git', + 'openhands-fix-issue-42-try3', + ], + capture_output=True, + text=True, + ) + + # Check the result + assert ( + result + == 'https://gitlab.com/test-owner/test-repo/-/compare/main...openhands-fix-issue-42-try3' + ) + + +@patch('openhands.resolver.send_pull_request.argparse.ArgumentParser') +@patch('openhands.resolver.send_pull_request.process_all_successful_issues') +@patch('openhands.resolver.send_pull_request.process_single_issue') +@patch('openhands.resolver.send_pull_request.load_single_resolver_output') +@patch('openhands.resolver.send_pull_request.identify_token') +@patch('os.path.exists') +@patch('os.getenv') +def test_main( + mock_getenv, + mock_path_exists, + mock_identify_token, + mock_load_single_resolver_output, + mock_process_single_issue, + mock_process_all_successful_issues, + mock_parser, +): + from openhands.resolver.send_pull_request import main + + # Setup mock parser + mock_args = MagicMock() + mock_args.token = None + mock_args.username = 'mock_username' + mock_args.output_dir = '/mock/output' + mock_args.pr_type = 'draft' + mock_args.issue_number = '42' + mock_args.fork_owner = None + mock_args.send_on_failure = False + mock_args.llm_model = 'mock_model' + mock_args.llm_base_url = 'mock_url' + mock_args.llm_api_key = 'mock_key' + mock_args.target_branch = None + mock_args.reviewer = None + mock_args.pr_title = None + mock_parser.return_value.parse_args.return_value = mock_args + + # Setup environment variables + mock_getenv.side_effect = ( + lambda key, default=None: 'mock_token' if key == 'GITLAB_TOKEN' else default + ) + + # Setup path exists + mock_path_exists.return_value = True + + # Setup mock resolver output + mock_resolver_output = MagicMock() + mock_load_single_resolver_output.return_value = mock_resolver_output + + mock_identify_token.return_value = Platform.GITLAB + + # Run main function + main() + + mock_identify_token.assert_called_with('mock_token') + + llm_config = LLMConfig( + model=mock_args.llm_model, + base_url=mock_args.llm_base_url, + api_key=mock_args.llm_api_key, + ) + + # Use any_call instead of assert_called_with for more flexible matching + assert mock_process_single_issue.call_args == call( + '/mock/output', + mock_resolver_output, + 'mock_token', + 'mock_username', + Platform.GITLAB, + 'draft', + llm_config, + None, + False, + mock_args.target_branch, + mock_args.reviewer, + mock_args.pr_title, + ) + + # Other assertions + mock_parser.assert_called_once() + mock_getenv.assert_any_call('GITLAB_TOKEN') + mock_path_exists.assert_called_with('/mock/output') + mock_load_single_resolver_output.assert_called_with('/mock/output/output.jsonl', 42) + + # Test for 'all_successful' issue number + mock_args.issue_number = 'all_successful' + main() + mock_process_all_successful_issues.assert_called_with( + '/mock/output', + 'mock_token', + 'mock_username', + Platform.GITLAB, + 'draft', + llm_config, + None, + ) + + # Test for invalid issue number + mock_args.issue_number = 'invalid' + with pytest.raises(ValueError): + main() + + # Test for invalid token + mock_identify_token.return_value = Platform.INVALID + with pytest.raises(ValueError, match='Token is invalid.'): + main() + + +@patch('subprocess.run') +def test_make_commit_escapes_issue_title(mock_subprocess_run): + # Setup + repo_dir = '/path/to/repo' + issue = Issue( + owner='test-owner', + repo='test-repo', + number=42, + title='Issue with "quotes" and $pecial characters', + body='Test body', + ) + + # Mock subprocess.run to return success for all calls + mock_subprocess_run.return_value = MagicMock( + returncode=0, stdout='sample output', stderr='' + ) + + # Call the function + issue_type = 'issue' + make_commit(repo_dir, issue, issue_type) + + # Assert that subprocess.run was called with the correct arguments + calls = mock_subprocess_run.call_args_list + assert len(calls) == 4 # git config check, git add, git commit + + # Check the git commit call + git_commit_call = calls[3][0][0] + expected_commit_message = ( + 'Fix issue #42: Issue with "quotes" and $pecial characters' + ) + assert [ + 'git', + '-C', + '/path/to/repo', + 'commit', + '-m', + expected_commit_message, + ] == git_commit_call + + +@patch('subprocess.run') +def test_make_commit_no_changes(mock_subprocess_run): + # Setup + repo_dir = '/path/to/repo' + issue = Issue( + owner='test-owner', + repo='test-repo', + number=42, + title='Issue with no changes', + body='Test body', + ) + + # Mock subprocess.run to simulate no changes in the repo + mock_subprocess_run.side_effect = [ + MagicMock(returncode=0), + MagicMock(returncode=0), + MagicMock(returncode=1, stdout=''), # git status --porcelain (no changes) + ] + + with pytest.raises( + RuntimeError, match='ERROR: Openhands failed to make code changes.' + ): + make_commit(repo_dir, issue, 'issue') + + # Check that subprocess.run was called for checking git status and add, but not commit + assert mock_subprocess_run.call_count == 3 + git_status_call = mock_subprocess_run.call_args_list[2][0][0] + assert f'git -C {repo_dir} status --porcelain' in git_status_call + + +def test_apply_patch_rename_directory(mock_output_dir): + # Create a sample directory structure + old_dir = os.path.join(mock_output_dir, 'prompts', 'resolve') + os.makedirs(old_dir) + + # Create test files + test_files = [ + 'issue-success-check.jinja', + 'pr-feedback-check.jinja', + 'pr-thread-check.jinja', + ] + for filename in test_files: + file_path = os.path.join(old_dir, filename) + with open(file_path, 'w') as f: + f.write(f'Content of {filename}') + + # Create a patch that renames the directory + patch_content = """diff --git a/prompts/resolve/issue-success-check.jinja b/prompts/guess_success/issue-success-check.jinja +similarity index 100% +rename from prompts/resolve/issue-success-check.jinja +rename to prompts/guess_success/issue-success-check.jinja +diff --git a/prompts/resolve/pr-feedback-check.jinja b/prompts/guess_success/pr-feedback-check.jinja +similarity index 100% +rename from prompts/resolve/pr-feedback-check.jinja +rename to prompts/guess_success/pr-feedback-check.jinja +diff --git a/prompts/resolve/pr-thread-check.jinja b/prompts/guess_success/pr-thread-check.jinja +similarity index 100% +rename from prompts/resolve/pr-thread-check.jinja +rename to prompts/guess_success/pr-thread-check.jinja""" + + # Apply the patch + apply_patch(mock_output_dir, patch_content) + + # Check if files were moved correctly + new_dir = os.path.join(mock_output_dir, 'prompts', 'guess_success') + assert not os.path.exists(old_dir), 'Old directory still exists' + assert os.path.exists(new_dir), 'New directory was not created' + + # Check if all files were moved and content preserved + for filename in test_files: + old_path = os.path.join(old_dir, filename) + new_path = os.path.join(new_dir, filename) + assert not os.path.exists(old_path), f'Old file {filename} still exists' + assert os.path.exists(new_path), f'New file {filename} was not created' + with open(new_path, 'r') as f: + content = f.read() + assert content == f'Content of {filename}', f'Content mismatch for {filename}' diff --git a/tests/unit/resolver/test_issue_references.py b/tests/unit/resolver/test_issue_references.py index 409f276d5abc..0a117492bf01 100644 --- a/tests/unit/resolver/test_issue_references.py +++ b/tests/unit/resolver/test_issue_references.py @@ -1,19 +1,15 @@ -from openhands.core.config.llm_config import LLMConfig -from openhands.resolver.issue_definitions import IssueHandler +from openhands.resolver.utils import extract_issue_references def test_extract_issue_references(): - llm_config = LLMConfig(model='test', api_key='test') - handler = IssueHandler('test-owner', 'test-repo', 'test-token', llm_config) - # Test basic issue reference - assert handler._extract_issue_references('Fixes #123') == [123] + assert extract_issue_references('Fixes #123') == [123] # Test multiple issue references - assert handler._extract_issue_references('Fixes #123, #456') == [123, 456] + assert extract_issue_references('Fixes #123, #456') == [123, 456] # Test issue references in code blocks should be ignored - assert handler._extract_issue_references(""" + assert extract_issue_references(""" Here's a code block: ```python # This is a comment with #123 @@ -24,21 +20,37 @@ def func(): """) == [789] # Test issue references in inline code should be ignored - assert handler._extract_issue_references( + assert extract_issue_references( + 'This `#123` should be ignored but #456 should be extracted' + ) == [456] + assert extract_issue_references( 'This `#123` should be ignored but #456 should be extracted' ) == [456] # Test issue references in URLs should be ignored - assert handler._extract_issue_references( + assert extract_issue_references( + 'Check http://example.com/#123 but #456 should be extracted' + ) == [456] + assert extract_issue_references( 'Check http://example.com/#123 but #456 should be extracted' ) == [456] # Test issue references in markdown links should be extracted - assert handler._extract_issue_references( - '[Link to #123](http://example.com) and #456' - ) == [123, 456] + assert extract_issue_references('[Link to #123](http://example.com) and #456') == [ + 123, + 456, + ] + assert extract_issue_references('[Link to #123](http://example.com) and #456') == [ + 123, + 456, + ] # Test issue references with text around them - assert handler._extract_issue_references( - 'Issue #123 is fixed and #456 is pending' - ) == [123, 456] + assert extract_issue_references('Issue #123 is fixed and #456 is pending') == [ + 123, + 456, + ] + assert extract_issue_references('Issue #123 is fixed and #456 is pending') == [ + 123, + 456, + ] From edd51102ad907652439ab56fda5154cf7dab3c81 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Thu, 13 Feb 2025 19:29:51 -0500 Subject: [PATCH 10/44] fix: Simplify nested f-string to fix pydoc-markdown parsing (#6717) --- openhands/resolver/interfaces/gitlab.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/openhands/resolver/interfaces/gitlab.py b/openhands/resolver/interfaces/gitlab.py index 0b2937170910..52661d93032d 100644 --- a/openhands/resolver/interfaces/gitlab.py +++ b/openhands/resolver/interfaces/gitlab.py @@ -33,7 +33,8 @@ def get_headers(self): } def get_base_url(self): - return f'https://gitlab.com/api/v4/projects/{quote(f'{self.owner}/{self.repo}', safe="")}' + project_path = quote(f'{self.owner}/{self.repo}', safe="") + return f'https://gitlab.com/api/v4/projects/{project_path}' def get_authorize_url(self): return f'https://{self.username}:{self.token}@gitlab.com/' @@ -45,9 +46,9 @@ def get_download_url(self): return f'{self.base_url}/issues' def get_clone_url(self): - username_and_token = ( - f'{self.username}:{self.token}' if self.username else f'{self.token}' - ) + username_and_token = self.token + if self.username: + username_and_token = f'{self.username}:{self.token}' return f'https://{username_and_token}@gitlab.com/{self.owner}/{self.repo}.git' def get_graphql_url(self): @@ -360,7 +361,8 @@ def download_pr_metadata( } """ - variables = {'projectPath': f'{self.owner}/{self.repo}', 'pr': f'{pull_number}'} + project_path = f'{self.owner}/{self.repo}' + variables = {'projectPath': project_path, 'pr': str(pull_number)} response = requests.post( self.get_graphql_url(), From 85e3a00d9d9c24665f73975dc4078ae455dac5b2 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Thu, 13 Feb 2025 23:31:22 -0500 Subject: [PATCH 11/44] hotfix(Resolver): Workflow definition is out of sync with released package (#6719) --- .github/workflows/openhands-resolver.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/openhands-resolver.yml b/.github/workflows/openhands-resolver.yml index 863896cf9213..dbfd678fc162 100644 --- a/.github/workflows/openhands-resolver.yml +++ b/.github/workflows/openhands-resolver.yml @@ -231,7 +231,7 @@ jobs: - name: Attempt to resolve issue env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN || github.token }} - GIT_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} + GITHUB_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} LLM_MODEL: ${{ secrets.LLM_MODEL || inputs.LLM_MODEL }} LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} @@ -267,7 +267,7 @@ jobs: if: always() # Create PR or branch even if the previous steps fail env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN || github.token }} - GIT_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} + GITHUB_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} LLM_MODEL: ${{ secrets.LLM_MODEL || inputs.LLM_MODEL }} LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} From 0c03e257b7b5e8bb51ed0cc85a09e8db1214b790 Mon Sep 17 00:00:00 2001 From: "sp.wack" <83104063+amanape@users.noreply.github.com> Date: Fri, 14 Feb 2025 15:11:18 +0400 Subject: [PATCH 12/44] feat(frontend): Settings screen (#6550) --- .../account-settings-context-menu.test.tsx | 23 +- .../analytics-consent-form-modal.test.tsx | 23 +- .../features/sidebar/sidebar.test.tsx | 162 +--- .../settings/account-settings-modal.test.tsx | 174 ---- .../modals/settings/brand-button.test.tsx | 39 + .../modals/settings/model-selector.test.tsx | 1 - .../settings/settings-input.test.tsx | 88 ++ .../settings/settings-switch.test.tsx | 64 ++ .../modals/settings/settings-form.test.tsx | 68 +- .../components/user-actions.test.tsx | 59 +- .../hooks/mutation/use-save-settings.test.tsx | 36 + frontend/__tests__/i18n/translations.test.tsx | 25 +- frontend/__tests__/routes/home.test.tsx | 114 +++ frontend/__tests__/routes/settings.test.tsx | 873 ++++++++++++++++++ .../utils/has-advanced-settings-set.test.ts | 56 ++ .../__tests__/utils/is-custom-model.test.ts | 20 + frontend/src/api/open-hands.ts | 6 +- .../analytics-consent-form-modal.tsx | 3 +- .../account-settings-context-menu.tsx | 10 - .../github-repositories-suggestion-box.tsx | 55 +- .../features/settings/brand-button.tsx | 39 + .../features/settings/help-link.tsx | 22 + .../features/settings/key-status-icon.tsx | 16 + .../features/settings/optional-tag.tsx | 3 + .../settings/settings-dropdown-input.tsx | 56 ++ .../features/settings/settings-input.tsx | 50 + .../features/settings/settings-switch.tsx | 50 + .../settings/styled-switch-component.tsx | 26 + .../components/features/sidebar/sidebar.tsx | 86 +- .../features/sidebar/user-actions.tsx | 13 +- .../features/sidebar/user-avatar.tsx | 11 +- .../shared/buttons/all-hands-logo-button.tsx | 2 +- .../components/shared/buttons/docs-button.tsx | 4 +- .../shared/buttons/exit-project-button.tsx | 4 +- .../shared/buttons/settings-button.tsx | 7 +- .../account-settings-form.tsx | 160 ---- .../account-settings-modal.tsx | 14 - .../account-settings/github-token-input.tsx | 39 - .../shared/modals/settings/model-selector.tsx | 202 ++-- .../shared/modals/settings/settings-form.tsx | 195 +--- .../shared/modals/settings/settings-modal.tsx | 17 +- frontend/src/context/settings-context.tsx | 10 +- .../src/hooks/mutation/use-save-settings.ts | 13 +- frontend/src/hooks/query/use-settings.ts | 14 +- frontend/src/hooks/use-app-logout.ts | 16 + frontend/src/icons/academy.svg | 4 + frontend/src/icons/plus.svg | 11 + frontend/src/icons/profile.svg | 11 + frontend/src/icons/settings.svg | 4 + frontend/src/icons/success.svg | 4 + frontend/src/icons/warning.svg | 5 + frontend/src/mocks/handlers.ts | 4 + frontend/src/query-client-config.ts | 20 +- frontend/src/routes.ts | 1 + frontend/src/routes/settings.tsx | 452 +++++++++ frontend/src/types/react-query.d.ts | 7 + frontend/src/types/settings.ts | 4 +- .../src/utils/has-advanced-settings-set.ts | 10 + frontend/src/utils/is-custom-model.ts | 22 + frontend/src/utils/settings-utils.ts | 6 +- 60 files changed, 2429 insertions(+), 1104 deletions(-) delete mode 100644 frontend/__tests__/components/modals/settings/account-settings-modal.test.tsx create mode 100644 frontend/__tests__/components/modals/settings/brand-button.test.tsx create mode 100644 frontend/__tests__/components/settings/settings-input.test.tsx create mode 100644 frontend/__tests__/components/settings/settings-switch.test.tsx create mode 100644 frontend/__tests__/hooks/mutation/use-save-settings.test.tsx create mode 100644 frontend/__tests__/routes/home.test.tsx create mode 100644 frontend/__tests__/routes/settings.test.tsx create mode 100644 frontend/__tests__/utils/has-advanced-settings-set.test.ts create mode 100644 frontend/__tests__/utils/is-custom-model.test.ts create mode 100644 frontend/src/components/features/settings/brand-button.tsx create mode 100644 frontend/src/components/features/settings/help-link.tsx create mode 100644 frontend/src/components/features/settings/key-status-icon.tsx create mode 100644 frontend/src/components/features/settings/optional-tag.tsx create mode 100644 frontend/src/components/features/settings/settings-dropdown-input.tsx create mode 100644 frontend/src/components/features/settings/settings-input.tsx create mode 100644 frontend/src/components/features/settings/settings-switch.tsx create mode 100644 frontend/src/components/features/settings/styled-switch-component.tsx delete mode 100644 frontend/src/components/shared/modals/account-settings/account-settings-form.tsx delete mode 100644 frontend/src/components/shared/modals/account-settings/account-settings-modal.tsx delete mode 100644 frontend/src/components/shared/modals/account-settings/github-token-input.tsx create mode 100644 frontend/src/hooks/use-app-logout.ts create mode 100644 frontend/src/icons/academy.svg create mode 100644 frontend/src/icons/plus.svg create mode 100644 frontend/src/icons/profile.svg create mode 100644 frontend/src/icons/settings.svg create mode 100644 frontend/src/icons/success.svg create mode 100644 frontend/src/icons/warning.svg create mode 100644 frontend/src/routes/settings.tsx create mode 100644 frontend/src/utils/has-advanced-settings-set.ts create mode 100644 frontend/src/utils/is-custom-model.ts diff --git a/frontend/__tests__/components/context-menu/account-settings-context-menu.test.tsx b/frontend/__tests__/components/context-menu/account-settings-context-menu.test.tsx index 89780e07aef7..00ac10532202 100644 --- a/frontend/__tests__/components/context-menu/account-settings-context-menu.test.tsx +++ b/frontend/__tests__/components/context-menu/account-settings-context-menu.test.tsx @@ -18,7 +18,6 @@ describe("AccountSettingsContextMenu", () => { it("should always render the right options", () => { render( { expect( screen.getByTestId("account-settings-context-menu"), ).toBeInTheDocument(); - expect(screen.getByText("ACCOUNT_SETTINGS$SETTINGS")).toBeInTheDocument(); expect(screen.getByText("ACCOUNT_SETTINGS$LOGOUT")).toBeInTheDocument(); }); - it("should call onClickAccountSettings when the account settings option is clicked", async () => { - render( - , - ); - - const accountSettingsOption = screen.getByText("ACCOUNT_SETTINGS$SETTINGS"); - await user.click(accountSettingsOption); - - expect(onClickAccountSettingsMock).toHaveBeenCalledOnce(); - }); - it("should call onLogout when the logout option is clicked", async () => { render( { test("onLogout should be disabled if the user is not logged in", async () => { render( { it("should call onClose when clicking outside of the element", async () => { render( , ); - const accountSettingsButton = screen.getByText("ACCOUNT_SETTINGS$SETTINGS"); + const accountSettingsButton = screen.getByText("ACCOUNT_SETTINGS$LOGOUT"); await user.click(accountSettingsButton); await user.click(document.body); diff --git a/frontend/__tests__/components/features/analytics/analytics-consent-form-modal.test.tsx b/frontend/__tests__/components/features/analytics/analytics-consent-form-modal.test.tsx index 01f7f5a1378a..4ebfbe3ba57e 100644 --- a/frontend/__tests__/components/features/analytics/analytics-consent-form-modal.test.tsx +++ b/frontend/__tests__/components/features/analytics/analytics-consent-form-modal.test.tsx @@ -1,6 +1,6 @@ import userEvent from "@testing-library/user-event"; import { describe, expect, it, vi } from "vitest"; -import { render, screen } from "@testing-library/react"; +import { render, screen, waitFor } from "@testing-library/react"; import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; import { AnalyticsConsentFormModal } from "#/components/features/analytics/analytics-consent-form-modal"; import OpenHands from "#/api/open-hands"; @@ -8,7 +8,7 @@ import { SettingsProvider } from "#/context/settings-context"; import { AuthProvider } from "#/context/auth-context"; describe("AnalyticsConsentFormModal", () => { - it("should call saveUserSettings with default settings on confirm reset settings", async () => { + it("should call saveUserSettings with consent", async () => { const user = userEvent.setup(); const onCloseMock = vi.fn(); const saveUserSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); @@ -26,20 +26,9 @@ describe("AnalyticsConsentFormModal", () => { const confirmButton = screen.getByTestId("confirm-preferences"); await user.click(confirmButton); - expect(saveUserSettingsSpy).toHaveBeenCalledWith({ - user_consents_to_analytics: true, - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - github_token: undefined, - language: "en", - llm_api_key: undefined, - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - security_analyzer: "", - unset_github_token: undefined, - }); - expect(onCloseMock).toHaveBeenCalled(); + expect(saveUserSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ user_consents_to_analytics: true }), + ); + await waitFor(() => expect(onCloseMock).toHaveBeenCalled()); }); }); diff --git a/frontend/__tests__/components/features/sidebar/sidebar.test.tsx b/frontend/__tests__/components/features/sidebar/sidebar.test.tsx index 62e2c05e047a..0039a1819b45 100644 --- a/frontend/__tests__/components/features/sidebar/sidebar.test.tsx +++ b/frontend/__tests__/components/features/sidebar/sidebar.test.tsx @@ -1,9 +1,6 @@ -import { screen, within } from "@testing-library/react"; -import userEvent from "@testing-library/user-event"; import { afterEach, describe, expect, it, vi } from "vitest"; import { renderWithProviders } from "test-utils"; import { createRoutesStub } from "react-router"; -import { AxiosError } from "axios"; import { Sidebar } from "#/components/features/sidebar/sidebar"; import OpenHands from "#/api/open-hands"; @@ -21,161 +18,14 @@ const renderSidebar = () => renderWithProviders(); describe("Sidebar", () => { - describe("Settings", () => { - const getSettingsSpy = vi.spyOn(OpenHands, "getSettings"); - const saveSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); + const getSettingsSpy = vi.spyOn(OpenHands, "getSettings"); - afterEach(() => { - vi.clearAllMocks(); - }); - - it("should fetch settings data on mount", () => { - renderSidebar(); - expect(getSettingsSpy).toHaveBeenCalledOnce(); - }); - - it("should send all settings data when saving AI configuration", async () => { - const user = userEvent.setup(); - renderSidebar(); - - const settingsButton = screen.getByTestId("settings-button"); - await user.click(settingsButton); - - const settingsModal = screen.getByTestId("ai-config-modal"); - const saveButton = within(settingsModal).getByTestId( - "save-settings-button", - ); - await user.click(saveButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - language: "en", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - }); - }); - - it("should not reset AI configuration when saving account settings", async () => { - const user = userEvent.setup(); - renderSidebar(); - - const userAvatar = screen.getByTestId("user-avatar"); - await user.click(userAvatar); - - const menu = screen.getByTestId("account-settings-context-menu"); - const accountSettingsButton = within(menu).getByTestId( - "account-settings-button", - ); - await user.click(accountSettingsButton); - - const accountSettingsModal = screen.getByTestId("account-settings-form"); - - const languageInput = - within(accountSettingsModal).getByLabelText(/language/i); - await user.click(languageInput); - - const norskOption = screen.getByText(/norsk/i); - await user.click(norskOption); - - const tokenInput = - within(accountSettingsModal).getByLabelText(/GITHUB\$TOKEN_LABEL/i); - await user.type(tokenInput, "new-token"); - - const analyticsConsentInput = - within(accountSettingsModal).getByTestId("analytics-consent"); - await user.click(analyticsConsentInput); - - const saveButton = - within(accountSettingsModal).getByTestId("save-settings"); - await user.click(saveButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - github_token: "new-token", - language: "no", - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - security_analyzer: "", - user_consents_to_analytics: true, - }); - }); - - it("should not send the api key if its SET", async () => { - const user = userEvent.setup(); - renderSidebar(); - - const settingsButton = screen.getByTestId("settings-button"); - await user.click(settingsButton); - - const settingsModal = screen.getByTestId("ai-config-modal"); - - // Click the advanced options switch to show the API key input - const advancedOptionsSwitch = within(settingsModal).getByTestId( - "advanced-option-switch", - ); - await user.click(advancedOptionsSwitch); - - const apiKeyInput = within(settingsModal).getByLabelText(/API\$KEY/i); - await user.type(apiKeyInput, "**********"); - - const saveButton = within(settingsModal).getByTestId( - "save-settings-button", - ); - await user.click(saveButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - language: "en", - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - }); - }); + afterEach(() => { + vi.clearAllMocks(); }); - describe("Settings Modal", () => { - it("should open the settings modal if the user clicks the settings button", async () => { - const user = userEvent.setup(); - renderSidebar(); - - expect(screen.queryByTestId("ai-config-modal")).not.toBeInTheDocument(); - - const settingsButton = screen.getByTestId("settings-button"); - await user.click(settingsButton); - - const settingsModal = screen.getByTestId("ai-config-modal"); - expect(settingsModal).toBeInTheDocument(); - }); - - it("should open the settings modal if GET /settings fails with a 404", async () => { - const error = new AxiosError( - "Request failed with status code 404", - "ERR_BAD_REQUEST", - undefined, - undefined, - { - status: 404, - statusText: "Not Found", - data: { message: "Settings not found" }, - headers: {}, - // @ts-expect-error - we only need the response object for this test - config: {}, - }, - ); - - vi.spyOn(OpenHands, "getSettings").mockRejectedValue(error); - - renderSidebar(); - - const settingsModal = await screen.findByTestId("ai-config-modal"); - expect(settingsModal).toBeInTheDocument(); - }); + it("should fetch settings data on mount", () => { + renderSidebar(); + expect(getSettingsSpy).toHaveBeenCalled(); }); }); diff --git a/frontend/__tests__/components/modals/settings/account-settings-modal.test.tsx b/frontend/__tests__/components/modals/settings/account-settings-modal.test.tsx deleted file mode 100644 index 2291b0af5d74..000000000000 --- a/frontend/__tests__/components/modals/settings/account-settings-modal.test.tsx +++ /dev/null @@ -1,174 +0,0 @@ -import { screen, waitFor } from "@testing-library/react"; -import { afterEach, describe, expect, it, vi } from "vitest"; -import userEvent from "@testing-library/user-event"; -import { renderWithProviders } from "test-utils"; -import { AccountSettingsModal } from "#/components/shared/modals/account-settings/account-settings-modal"; -import { MOCK_DEFAULT_USER_SETTINGS } from "#/mocks/handlers"; -import OpenHands from "#/api/open-hands"; -import * as ConsentHandlers from "#/utils/handle-capture-consent"; - -describe("AccountSettingsModal", () => { - const getSettingsSpy = vi.spyOn(OpenHands, "getSettings"); - const saveSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); - - afterEach(() => { - vi.clearAllMocks(); - }); - - it.skip("should set the appropriate user analytics consent default", async () => { - getSettingsSpy.mockResolvedValue({ - ...MOCK_DEFAULT_USER_SETTINGS, - user_consents_to_analytics: true, - }); - renderWithProviders( {}} />); - - const analyticsConsentInput = screen.getByTestId("analytics-consent"); - await waitFor(() => expect(analyticsConsentInput).toBeChecked()); - }); - - it("should save the users consent to analytics when saving account settings", async () => { - const user = userEvent.setup(); - renderWithProviders( {}} />); - - const analyticsConsentInput = screen.getByTestId("analytics-consent"); - await user.click(analyticsConsentInput); - - const saveButton = screen.getByTestId("save-settings"); - await user.click(saveButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - language: "en", - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - security_analyzer: "", - user_consents_to_analytics: true, - }); - }); - - it("should call handleCaptureConsent with the analytics consent value if the save is successful", async () => { - const user = userEvent.setup(); - const handleCaptureConsentSpy = vi.spyOn( - ConsentHandlers, - "handleCaptureConsent", - ); - renderWithProviders( {}} />); - - const analyticsConsentInput = screen.getByTestId("analytics-consent"); - await user.click(analyticsConsentInput); - - const saveButton = screen.getByTestId("save-settings"); - await user.click(saveButton); - - expect(handleCaptureConsentSpy).toHaveBeenCalledWith(true); - - await user.click(analyticsConsentInput); - await user.click(saveButton); - - expect(handleCaptureConsentSpy).toHaveBeenCalledWith(false); - }); - - it("should send all settings data when saving account settings", async () => { - const user = userEvent.setup(); - renderWithProviders( {}} />); - - const languageInput = screen.getByLabelText(/language/i); - await user.click(languageInput); - - const norskOption = screen.getByText(/norsk/i); - await user.click(norskOption); - - const tokenInput = screen.getByTestId("github-token-input"); - await user.type(tokenInput, "new-token"); - - const saveButton = screen.getByTestId("save-settings"); - await user.click(saveButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - language: "no", - github_token: "new-token", - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - security_analyzer: "", - user_consents_to_analytics: false, - }); - }); - - it("should render a checkmark and not the input if the github token is set", async () => { - getSettingsSpy.mockResolvedValue({ - ...MOCK_DEFAULT_USER_SETTINGS, - github_token_is_set: true, - }); - renderWithProviders( {}} />); - - await waitFor(() => { - const checkmark = screen.queryByTestId("github-token-set-checkmark"); - const input = screen.queryByTestId("github-token-input"); - - expect(checkmark).toBeInTheDocument(); - expect(input).not.toBeInTheDocument(); - }); - }); - - it("should send an unset github token property when pressing disconnect", async () => { - const user = userEvent.setup(); - getSettingsSpy.mockResolvedValue({ - ...MOCK_DEFAULT_USER_SETTINGS, - github_token_is_set: true, - }); - renderWithProviders( {}} />); - - const disconnectButton = await screen.findByTestId("disconnect-github"); - await user.click(disconnectButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - github_token: undefined, - language: "en", - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - security_analyzer: "", - unset_github_token: true, - }); - }); - - it("should not unset the github token when changing the language", async () => { - const user = userEvent.setup(); - getSettingsSpy.mockResolvedValue({ - ...MOCK_DEFAULT_USER_SETTINGS, - github_token_is_set: true, - }); - renderWithProviders( {}} />); - - const languageInput = screen.getByLabelText(/language/i); - await user.click(languageInput); - - const norskOption = screen.getByText(/norsk/i); - await user.click(norskOption); - - const saveButton = screen.getByTestId("save-settings"); - await user.click(saveButton); - - expect(saveSettingsSpy).toHaveBeenCalledWith({ - agent: "CodeActAgent", - confirmation_mode: false, - enable_default_condenser: false, - language: "no", - llm_base_url: "", - llm_model: "anthropic/claude-3-5-sonnet-20241022", - remote_runtime_resource_factor: 1, - security_analyzer: "", - user_consents_to_analytics: false, - }); - }); -}); diff --git a/frontend/__tests__/components/modals/settings/brand-button.test.tsx b/frontend/__tests__/components/modals/settings/brand-button.test.tsx new file mode 100644 index 000000000000..784cecc62514 --- /dev/null +++ b/frontend/__tests__/components/modals/settings/brand-button.test.tsx @@ -0,0 +1,39 @@ +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { describe, expect, it, vi } from "vitest"; +import { BrandButton } from "#/components/features/settings/brand-button"; + +describe("BrandButton", () => { + const onClickMock = vi.fn(); + + it("should set a test id", () => { + render( + + Test Button + , + ); + + expect(screen.getByTestId("brand-button")).toBeInTheDocument(); + }); + + it("should call onClick when clicked", async () => { + const user = userEvent.setup(); + render( + + Test Button + , + ); + + await user.click(screen.getByText("Test Button")); + }); + + it("should be disabled if isDisabled is true", () => { + render( + + Test Button + , + ); + + expect(screen.getByText("Test Button")).toBeDisabled(); + }); +}); diff --git a/frontend/__tests__/components/modals/settings/model-selector.test.tsx b/frontend/__tests__/components/modals/settings/model-selector.test.tsx index 757f5dcd45ce..cacc6fad1053 100644 --- a/frontend/__tests__/components/modals/settings/model-selector.test.tsx +++ b/frontend/__tests__/components/modals/settings/model-selector.test.tsx @@ -2,7 +2,6 @@ import { describe, it, expect, vi } from "vitest"; import { render, screen } from "@testing-library/react"; import userEvent from "@testing-library/user-event"; import { ModelSelector } from "#/components/shared/modals/settings/model-selector"; -import { I18nKey } from "#/i18n/declaration"; // Mock react-i18next vi.mock("react-i18next", () => ({ diff --git a/frontend/__tests__/components/settings/settings-input.test.tsx b/frontend/__tests__/components/settings/settings-input.test.tsx new file mode 100644 index 000000000000..6009a2409e83 --- /dev/null +++ b/frontend/__tests__/components/settings/settings-input.test.tsx @@ -0,0 +1,88 @@ +import { render, screen } from "@testing-library/react"; +import { describe, expect, it } from "vitest"; +import { SettingsInput } from "#/components/features/settings/settings-input"; + +describe("SettingsInput", () => { + it("should render an optional tag if showOptionalTag is true", async () => { + const { rerender } = render( + , + ); + + expect(screen.queryByText(/optional/i)).not.toBeInTheDocument(); + + rerender( + , + ); + + expect(screen.getByText(/optional/i)).toBeInTheDocument(); + }); + + it("should disable the input if isDisabled is true", async () => { + const { rerender } = render( + , + ); + + expect(screen.getByTestId("test-input")).toBeEnabled(); + + rerender( + , + ); + + expect(screen.getByTestId("test-input")).toBeDisabled(); + }); + + it("should set a placeholder on the input", async () => { + render( + , + ); + + expect(screen.getByTestId("test-input")).toHaveAttribute( + "placeholder", + "Test Placeholder", + ); + }); + + it("should set a default value on the input", async () => { + render( + , + ); + + expect(screen.getByTestId("test-input")).toHaveValue("Test Value"); + }); + + it("should render start content", async () => { + const startContent =
Start Content
; + + render( + , + ); + + expect(screen.getByText("Start Content")).toBeInTheDocument(); + }); +}); diff --git a/frontend/__tests__/components/settings/settings-switch.test.tsx b/frontend/__tests__/components/settings/settings-switch.test.tsx new file mode 100644 index 000000000000..054bbc932823 --- /dev/null +++ b/frontend/__tests__/components/settings/settings-switch.test.tsx @@ -0,0 +1,64 @@ +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { describe, expect, it, vi } from "vitest"; +import { SettingsSwitch } from "#/components/features/settings/settings-switch"; + +describe("SettingsSwitch", () => { + it("should call the onChange handler when the input is clicked", async () => { + const user = userEvent.setup(); + const onToggleMock = vi.fn(); + render( + + Test Switch + , + ); + + const switchInput = screen.getByTestId("test-switch"); + + await user.click(switchInput); + expect(onToggleMock).toHaveBeenCalledWith(true); + + await user.click(switchInput); + expect(onToggleMock).toHaveBeenCalledWith(false); + }); + + it("should render a beta tag if isBeta is true", () => { + const { rerender } = render( + + Test Switch + , + ); + + expect(screen.queryByText(/beta/i)).not.toBeInTheDocument(); + + rerender( + + Test Switch + , + ); + + expect(screen.getByText(/beta/i)).toBeInTheDocument(); + }); + + it("should be able to set a default toggle state", async () => { + const user = userEvent.setup(); + const onToggleMock = vi.fn(); + render( + + Test Switch + , + ); + + expect(screen.getByTestId("test-switch")).toBeChecked(); + + const switchInput = screen.getByTestId("test-switch"); + await user.click(switchInput); + expect(onToggleMock).toHaveBeenCalledWith(false); + + expect(screen.getByTestId("test-switch")).not.toBeChecked(); + }); +}); diff --git a/frontend/__tests__/components/shared/modals/settings/settings-form.test.tsx b/frontend/__tests__/components/shared/modals/settings/settings-form.test.tsx index 06d1628e1f74..d1d623f137f5 100644 --- a/frontend/__tests__/components/shared/modals/settings/settings-form.test.tsx +++ b/frontend/__tests__/components/shared/modals/settings/settings-form.test.tsx @@ -1,36 +1,22 @@ -import { screen, fireEvent } from "@testing-library/react"; -import { describe, it, expect, vi, afterEach } from "vitest"; +import userEvent from "@testing-library/user-event"; +import { describe, expect, it, vi } from "vitest"; import { renderWithProviders } from "test-utils"; import { createRoutesStub } from "react-router"; -import userEvent from "@testing-library/user-event"; -import { DEFAULT_SETTINGS } from "#/services/settings"; -import { SettingsForm } from "#/components/shared/modals/settings/settings-form"; +import { screen } from "@testing-library/react"; import OpenHands from "#/api/open-hands"; +import { SettingsForm } from "#/components/shared/modals/settings/settings-form"; +import { DEFAULT_SETTINGS } from "#/services/settings"; describe("SettingsForm", () => { - const getConfigSpy = vi.spyOn(OpenHands, "getConfig"); - const saveSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); - const onCloseMock = vi.fn(); + const saveSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); - afterEach(() => { - vi.clearAllMocks(); - }); - - getConfigSpy.mockResolvedValue({ - APP_MODE: "saas", - GITHUB_CLIENT_ID: "123", - POSTHOG_CLIENT_KEY: "123", - }); - - const RouterStub = createRoutesStub([ + const RouteStub = createRoutesStub([ { Component: () => ( ), @@ -38,39 +24,17 @@ describe("SettingsForm", () => { }, ]); - it("should not show runtime size selector by default", () => { - renderWithProviders(); - expect(screen.queryByText("Runtime Size")).not.toBeInTheDocument(); - }); - - it("should show runtime size selector when advanced options are enabled", async () => { - const user = userEvent.setup(); - renderWithProviders(); - - const toggleAdvancedMode = screen.getByTestId("advanced-option-switch"); - await user.click(toggleAdvancedMode); - - await screen.findByTestId("runtime-size"); - }); - - it("should not submit the form if required fields are empty", async () => { + it("should save the user settings and close the modal when the form is submitted", async () => { const user = userEvent.setup(); - renderWithProviders(); - - expect(screen.queryByTestId("custom-model-input")).not.toBeInTheDocument(); - - const toggleAdvancedMode = screen.getByTestId("advanced-option-switch"); - await user.click(toggleAdvancedMode); - - const customModelInput = screen.getByTestId("custom-model-input"); - expect(customModelInput).toBeInTheDocument(); - - await user.clear(customModelInput); + renderWithProviders(); - const saveButton = screen.getByTestId("save-settings-button"); + const saveButton = screen.getByRole("button", { name: /save/i }); await user.click(saveButton); - expect(saveSettingsSpy).not.toHaveBeenCalled(); - expect(onCloseMock).not.toHaveBeenCalled(); + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + llm_model: DEFAULT_SETTINGS.LLM_MODEL, + }), + ); }); }); diff --git a/frontend/__tests__/components/user-actions.test.tsx b/frontend/__tests__/components/user-actions.test.tsx index 143af7d7113f..3ce7e308d59c 100644 --- a/frontend/__tests__/components/user-actions.test.tsx +++ b/frontend/__tests__/components/user-actions.test.tsx @@ -14,24 +14,14 @@ describe("UserActions", () => { }); it("should render", () => { - render( - , - ); + render(); expect(screen.getByTestId("user-actions")).toBeInTheDocument(); expect(screen.getByTestId("user-avatar")).toBeInTheDocument(); }); it("should toggle the user menu when the user avatar is clicked", async () => { - render( - , - ); + render(); const userAvatar = screen.getByTestId("user-avatar"); await user.click(userAvatar); @@ -47,30 +37,9 @@ describe("UserActions", () => { ).not.toBeInTheDocument(); }); - it("should call onClickAccountSettings and close the menu when the account settings option is clicked", async () => { - render( - , - ); - - const userAvatar = screen.getByTestId("user-avatar"); - await user.click(userAvatar); - - const accountSettingsOption = screen.getByText("ACCOUNT_SETTINGS$SETTINGS"); - await user.click(accountSettingsOption); - - expect(onClickAccountSettingsMock).toHaveBeenCalledOnce(); - expect( - screen.queryByTestId("account-settings-context-menu"), - ).not.toBeInTheDocument(); - }); - it("should call onLogout and close the menu when the logout option is clicked", async () => { render( , @@ -89,12 +58,7 @@ describe("UserActions", () => { }); test("onLogout should not be called when the user is not logged in", async () => { - render( - , - ); + render(); const userAvatar = screen.getByTestId("user-avatar"); await user.click(userAvatar); @@ -104,21 +68,4 @@ describe("UserActions", () => { expect(onLogoutMock).not.toHaveBeenCalled(); }); - - // FIXME: Spinner now provided through useQuery - it.skip("should display the loading spinner", () => { - render( - , - ); - - const userAvatar = screen.getByTestId("user-avatar"); - user.click(userAvatar); - - expect(screen.getByTestId("loading-spinner")).toBeInTheDocument(); - expect(screen.queryByAltText("user avatar")).not.toBeInTheDocument(); - }); }); diff --git a/frontend/__tests__/hooks/mutation/use-save-settings.test.tsx b/frontend/__tests__/hooks/mutation/use-save-settings.test.tsx new file mode 100644 index 000000000000..2cf5dce1d47f --- /dev/null +++ b/frontend/__tests__/hooks/mutation/use-save-settings.test.tsx @@ -0,0 +1,36 @@ +import { renderHook, waitFor } from "@testing-library/react"; +import { describe, expect, it, vi } from "vitest"; +import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; +import OpenHands from "#/api/open-hands"; +import { useSaveSettings } from "#/hooks/mutation/use-save-settings"; + +describe("useSaveSettings", () => { + it("should send an empty string for llm_api_key if an empty string is passed, otherwise undefined", async () => { + const saveSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); + const { result } = renderHook(() => useSaveSettings(), { + wrapper: ({ children }) => ( + + {children} + + ), + }); + + result.current.mutate({ LLM_API_KEY: "" }); + await waitFor(() => { + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + llm_api_key: "", + }), + ); + }); + + result.current.mutate({ LLM_API_KEY: null }); + await waitFor(() => { + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + llm_api_key: undefined, + }), + ); + }); + }); +}); diff --git a/frontend/__tests__/i18n/translations.test.tsx b/frontend/__tests__/i18n/translations.test.tsx index 3833b4d306d1..01a0bebffe2a 100644 --- a/frontend/__tests__/i18n/translations.test.tsx +++ b/frontend/__tests__/i18n/translations.test.tsx @@ -1,20 +1,21 @@ -import { screen } from '@testing-library/react'; -import { describe, expect, it } from 'vitest'; -import i18n from '../../src/i18n'; -import { AccountSettingsContextMenu } from '../../src/components/features/context-menu/account-settings-context-menu'; -import { renderWithProviders } from '../../test-utils'; +import { screen } from "@testing-library/react"; +import { describe, expect, it } from "vitest"; +import i18n from "../../src/i18n"; +import { AccountSettingsContextMenu } from "../../src/components/features/context-menu/account-settings-context-menu"; +import { renderWithProviders } from "../../test-utils"; -describe('Translations', () => { - it('should render translated text', () => { - i18n.changeLanguage('en'); +describe("Translations", () => { + it("should render translated text", () => { + i18n.changeLanguage("en"); renderWithProviders( {}} onLogout={() => {}} onClose={() => {}} - isLoggedIn={true} - /> + isLoggedIn + />, ); - expect(screen.getByTestId('account-settings-context-menu')).toBeInTheDocument(); + expect( + screen.getByTestId("account-settings-context-menu"), + ).toBeInTheDocument(); }); }); diff --git a/frontend/__tests__/routes/home.test.tsx b/frontend/__tests__/routes/home.test.tsx new file mode 100644 index 000000000000..ec7a24761f60 --- /dev/null +++ b/frontend/__tests__/routes/home.test.tsx @@ -0,0 +1,114 @@ +import { createRoutesStub } from "react-router"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { renderWithProviders } from "test-utils"; +import userEvent from "@testing-library/user-event"; +import { screen } from "@testing-library/react"; +import { AxiosError } from "axios"; +import MainApp from "#/routes/_oh/route"; +import SettingsScreen from "#/routes/settings"; +import Home from "#/routes/_oh._index/route"; +import OpenHands from "#/api/open-hands"; + +const createAxiosNotFoundErrorObject = () => + new AxiosError( + "Request failed with status code 404", + "ERR_BAD_REQUEST", + undefined, + undefined, + { + status: 404, + statusText: "Not Found", + data: { message: "Settings not found" }, + headers: {}, + // @ts-expect-error - we only need the response object for this test + config: {}, + }, + ); + +describe("Home Screen", () => { + const getSettingsSpy = vi.spyOn(OpenHands, "getSettings"); + + const RouterStub = createRoutesStub([ + { + // layout route + Component: MainApp, + path: "/", + children: [ + { + // home route + Component: Home, + path: "/", + }, + ], + }, + { + Component: SettingsScreen, + path: "/settings", + }, + ]); + + afterEach(() => { + vi.clearAllMocks(); + }); + + it("should render the home screen", () => { + renderWithProviders(); + }); + + it("should navigate to the settings screen when the settings button is clicked", async () => { + const user = userEvent.setup(); + renderWithProviders(); + + const settingsButton = await screen.findByTestId("settings-button"); + await user.click(settingsButton); + + const settingsScreen = await screen.findByTestId("settings-screen"); + expect(settingsScreen).toBeInTheDocument(); + }); + + it("should navigate to the settings when pressing 'Connect to GitHub' if the user isn't authenticated", async () => { + const user = userEvent.setup(); + renderWithProviders(); + + const connectToGitHubButton = + await screen.findByTestId("connect-to-github"); + await user.click(connectToGitHubButton); + + const settingsScreen = await screen.findByTestId("settings-screen"); + expect(settingsScreen).toBeInTheDocument(); + }); + + describe("Settings 404", () => { + it("should open the settings modal if GET /settings fails with a 404", async () => { + const error = createAxiosNotFoundErrorObject(); + getSettingsSpy.mockRejectedValue(error); + + renderWithProviders(); + + const settingsModal = await screen.findByTestId("ai-config-modal"); + expect(settingsModal).toBeInTheDocument(); + }); + + it("should navigate to the settings screen when clicking the advanced settings button", async () => { + const error = createAxiosNotFoundErrorObject(); + getSettingsSpy.mockRejectedValue(error); + + const user = userEvent.setup(); + renderWithProviders(); + + const settingsModal = await screen.findByTestId("ai-config-modal"); + expect(settingsModal).toBeInTheDocument(); + + const advancedSettingsButton = await screen.findByTestId( + "advanced-settings-link", + ); + await user.click(advancedSettingsButton); + + const settingsModalAfter = screen.queryByTestId("ai-config-modal"); + expect(settingsModalAfter).not.toBeInTheDocument(); + + const settingsScreen = await screen.findByTestId("settings-screen"); + expect(settingsScreen).toBeInTheDocument(); + }); + }); +}); diff --git a/frontend/__tests__/routes/settings.test.tsx b/frontend/__tests__/routes/settings.test.tsx new file mode 100644 index 000000000000..2052d6eb9cbe --- /dev/null +++ b/frontend/__tests__/routes/settings.test.tsx @@ -0,0 +1,873 @@ +import { render, screen, waitFor, within } from "@testing-library/react"; +import { createRoutesStub } from "react-router"; +import { afterEach, describe, expect, it, test, vi } from "vitest"; +import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; +import userEvent, { UserEvent } from "@testing-library/user-event"; +import OpenHands from "#/api/open-hands"; +import { AuthProvider } from "#/context/auth-context"; +import SettingsScreen from "#/routes/settings"; +import * as AdvancedSettingsUtlls from "#/utils/has-advanced-settings-set"; +import { MOCK_DEFAULT_USER_SETTINGS } from "#/mocks/handlers"; +import { PostApiSettings } from "#/types/settings"; +import * as ConsentHandlers from "#/utils/handle-capture-consent"; + +const toggleAdvancedSettings = async (user: UserEvent) => { + const advancedSwitch = await screen.findByTestId("advanced-settings-switch"); + await user.click(advancedSwitch); +}; + +describe("Settings Screen", () => { + const getSettingsSpy = vi.spyOn(OpenHands, "getSettings"); + const saveSettingsSpy = vi.spyOn(OpenHands, "saveSettings"); + const getConfigSpy = vi.spyOn(OpenHands, "getConfig"); + + const { handleLogoutMock } = vi.hoisted(() => ({ + handleLogoutMock: vi.fn(), + })); + vi.mock("#/hooks/use-app-logout", () => ({ + useAppLogout: vi.fn().mockReturnValue({ handleLogout: handleLogoutMock }), + })); + + afterEach(() => { + vi.clearAllMocks(); + }); + + const RouterStub = createRoutesStub([ + { + Component: SettingsScreen, + path: "/settings", + }, + ]); + + const renderSettingsScreen = () => { + const queryClient = new QueryClient(); + return render(, { + wrapper: ({ children }) => ( + + + {children} + + + ), + }); + }; + + it("should render", async () => { + renderSettingsScreen(); + + await waitFor(() => { + screen.getByText("LLM Settings"); + screen.getByText("GitHub Settings"); + screen.getByText("Additional Settings"); + screen.getByText("Reset to defaults"); + screen.getByText("Save Changes"); + }); + }); + + describe("Account Settings", () => { + it("should render the account settings", async () => { + renderSettingsScreen(); + + await waitFor(() => { + screen.getByTestId("github-token-input"); + screen.getByTestId("github-token-help-anchor"); + screen.getByTestId("language-input"); + screen.getByTestId("enable-analytics-switch"); + }); + }); + + it("should render an indicator if the GitHub token is not set", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + github_token_is_set: false, + }); + + renderSettingsScreen(); + + await waitFor(() => { + const input = screen.getByTestId("github-token-input"); + const inputParent = input.parentElement; + + if (inputParent) { + const badge = within(inputParent).getByTestId("unset-indicator"); + expect(badge).toBeInTheDocument(); + } else { + throw new Error("GitHub token input parent not found"); + } + }); + }); + + it("should render an indicator if the GitHub token is set", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + github_token_is_set: true, + }); + + renderSettingsScreen(); + + const input = await screen.findByTestId("github-token-input"); + const inputParent = input.parentElement; + + if (inputParent) { + const badge = await within(inputParent).findByTestId("set-indicator"); + expect(badge).toBeInTheDocument(); + } else { + throw new Error("GitHub token input parent not found"); + } + }); + + it("should render a disabled 'Disconnect from GitHub' button if the GitHub token is not set", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + github_token_is_set: false, + }); + + renderSettingsScreen(); + + const button = await screen.findByText("Disconnect from GitHub"); + expect(button).toBeInTheDocument(); + expect(button).toBeDisabled(); + }); + + it("should render an enabled 'Disconnect from GitHub' button if the GitHub token is set", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + github_token_is_set: true, + }); + + renderSettingsScreen(); + const button = await screen.findByText("Disconnect from GitHub"); + expect(button).toBeInTheDocument(); + expect(button).toBeEnabled(); + + // input should still be rendered + const input = await screen.findByTestId("github-token-input"); + expect(input).toBeInTheDocument(); + }); + + it("should logout the user when the 'Disconnect from GitHub' button is clicked", async () => { + const user = userEvent.setup(); + + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + github_token_is_set: true, + }); + + renderSettingsScreen(); + + const button = await screen.findByText("Disconnect from GitHub"); + await user.click(button); + + expect(handleLogoutMock).toHaveBeenCalled(); + }); + + it("should not render the 'Configure GitHub Repositories' button if OSS mode", async () => { + getConfigSpy.mockResolvedValue({ + APP_MODE: "oss", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + }); + + renderSettingsScreen(); + + const button = screen.queryByText("Configure GitHub Repositories"); + expect(button).not.toBeInTheDocument(); + }); + + it("should render the 'Configure GitHub Repositories' button if SaaS mode and app slug exists", async () => { + getConfigSpy.mockResolvedValue({ + APP_MODE: "saas", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + APP_SLUG: "test-app", + }); + + renderSettingsScreen(); + await screen.findByText("Configure GitHub Repositories"); + }); + + it("should not render the GitHub token input if SaaS mode", async () => { + getConfigSpy.mockResolvedValue({ + APP_MODE: "saas", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + }); + + renderSettingsScreen(); + + await waitFor(() => { + const input = screen.queryByTestId("github-token-input"); + const helpAnchor = screen.queryByTestId("github-token-help-anchor"); + + expect(input).not.toBeInTheDocument(); + expect(helpAnchor).not.toBeInTheDocument(); + }); + }); + + it.skip("should not reset LLM Provider and Model if GitHub token is invalid", async () => { + const user = userEvent.setup(); + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + github_token_is_set: false, + llm_model: "anthropic/claude-3-5-sonnet-20241022", + }); + saveSettingsSpy.mockRejectedValueOnce(new Error("Invalid GitHub token")); + + renderSettingsScreen(); + + let llmProviderInput = await screen.findByTestId("llm-provider-input"); + let llmModelInput = await screen.findByTestId("llm-model-input"); + + expect(llmProviderInput).toHaveValue("Anthropic"); + expect(llmModelInput).toHaveValue("claude-3-5-sonnet-20241022"); + + const input = await screen.findByTestId("github-token-input"); + await user.type(input, "invalid-token"); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + llmProviderInput = await screen.findByTestId("llm-provider-input"); + llmModelInput = await screen.findByTestId("llm-model-input"); + + expect(llmProviderInput).toHaveValue("Anthropic"); + expect(llmModelInput).toHaveValue("claude-3-5-sonnet-20241022"); + }); + + test("enabling advanced, enabling confirmation mode, and then disabling + enabling advanced should not render the security analyzer input", async () => { + const user = userEvent.setup(); + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + + const confirmationModeSwitch = await screen.findByTestId( + "enable-confirmation-mode-switch", + ); + await user.click(confirmationModeSwitch); + + let securityAnalyzerInput = screen.queryByTestId( + "security-analyzer-input", + ); + expect(securityAnalyzerInput).toBeInTheDocument(); + + await toggleAdvancedSettings(user); + + securityAnalyzerInput = screen.queryByTestId("security-analyzer-input"); + expect(securityAnalyzerInput).not.toBeInTheDocument(); + + await toggleAdvancedSettings(user); + + securityAnalyzerInput = screen.queryByTestId("security-analyzer-input"); + expect(securityAnalyzerInput).not.toBeInTheDocument(); + }); + }); + + describe("LLM Settings", () => { + it("should render the basic LLM settings by default", async () => { + renderSettingsScreen(); + + await waitFor(() => { + screen.getByTestId("advanced-settings-switch"); + screen.getByTestId("llm-provider-input"); + screen.getByTestId("llm-model-input"); + screen.getByTestId("llm-api-key-input"); + screen.getByTestId("llm-api-key-help-anchor"); + }); + }); + + it("should render the advanced LLM settings if the advanced switch is toggled", async () => { + const user = userEvent.setup(); + renderSettingsScreen(); + + // Should not render the advanced settings by default + expect( + screen.queryByTestId("llm-custom-model-input"), + ).not.toBeInTheDocument(); + expect(screen.queryByTestId("base-url-input")).not.toBeInTheDocument(); + expect(screen.queryByTestId("agent-input")).not.toBeInTheDocument(); + expect( + screen.queryByTestId("security-analyzer-input"), + ).not.toBeInTheDocument(); + expect( + screen.queryByTestId("enable-confirmation-mode-switch"), + ).not.toBeInTheDocument(); + + const advancedSwitch = await screen.findByTestId( + "advanced-settings-switch", + ); + await user.click(advancedSwitch); + + // Should render the advanced settings + expect( + screen.queryByTestId("llm-provider-input"), + ).not.toBeInTheDocument(); + expect(screen.queryByTestId("llm-model-input")).not.toBeInTheDocument(); + + screen.getByTestId("llm-custom-model-input"); + screen.getByTestId("base-url-input"); + screen.getByTestId("agent-input"); + + // "Invariant" security analyzer + screen.getByTestId("enable-confirmation-mode-switch"); + + // Not rendered until the switch is toggled + // screen.getByTestId("security-analyzer-input"); + }); + + it("should render an indicator if the LLM API key is not set", async () => { + getSettingsSpy.mockResolvedValueOnce({ + ...MOCK_DEFAULT_USER_SETTINGS, + llm_api_key: null, + }); + + renderSettingsScreen(); + + await waitFor(() => { + const input = screen.getByTestId("llm-api-key-input"); + const inputParent = input.parentElement; + + if (inputParent) { + const badge = within(inputParent).getByTestId("unset-indicator"); + expect(badge).toBeInTheDocument(); + } else { + throw new Error("LLM API Key input parent not found"); + } + }); + }); + + it("should render an indicator if the LLM API key is set", async () => { + getSettingsSpy.mockResolvedValueOnce({ + ...MOCK_DEFAULT_USER_SETTINGS, + llm_api_key: "**********", + }); + + renderSettingsScreen(); + + await waitFor(() => { + const input = screen.getByTestId("llm-api-key-input"); + const inputParent = input.parentElement; + + if (inputParent) { + const badge = within(inputParent).getByTestId("set-indicator"); + expect(badge).toBeInTheDocument(); + } else { + throw new Error("LLM API Key input parent not found"); + } + }); + }); + + it("should set asterik placeholder if the LLM API key is set", async () => { + getSettingsSpy.mockResolvedValueOnce({ + ...MOCK_DEFAULT_USER_SETTINGS, + llm_api_key: "**********", + }); + + renderSettingsScreen(); + + await waitFor(() => { + const input = screen.getByTestId("llm-api-key-input"); + expect(input).toHaveProperty("placeholder", "**********"); + }); + }); + + describe("Basic Model Selector", () => { + it("should set the provider and model", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + llm_model: "anthropic/claude-3-5-sonnet-20241022", + }); + + renderSettingsScreen(); + + await waitFor(() => { + const providerInput = screen.getByTestId("llm-provider-input"); + const modelInput = screen.getByTestId("llm-model-input"); + + expect(providerInput).toHaveValue("Anthropic"); + expect(modelInput).toHaveValue("claude-3-5-sonnet-20241022"); + }); + }); + + it.todo("should change the model values if the provider is changed"); + + it.todo("should clear the model values if the provider is cleared"); + }); + + describe("Advanced LLM Settings", () => { + it("should not render the runtime settings input if OSS mode", async () => { + const user = userEvent.setup(); + getConfigSpy.mockResolvedValue({ + APP_MODE: "oss", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + }); + + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + const input = screen.queryByTestId("runtime-settings-input"); + expect(input).not.toBeInTheDocument(); + }); + + it("should render the runtime settings input if SaaS mode", async () => { + const user = userEvent.setup(); + getConfigSpy.mockResolvedValue({ + APP_MODE: "saas", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + }); + + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + screen.getByTestId("runtime-settings-input"); + }); + + it("should set the default runtime setting set", async () => { + getConfigSpy.mockResolvedValue({ + APP_MODE: "saas", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + }); + + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + remote_runtime_resource_factor: 1, + }); + + renderSettingsScreen(); + + await toggleAdvancedSettings(userEvent.setup()); + + const input = await screen.findByTestId("runtime-settings-input"); + expect(input).toHaveValue("1x (2 core, 8G)"); + }); + + it("should save the runtime settings when the 'Save Changes' button is clicked", async () => { + const user = userEvent.setup(); + getConfigSpy.mockResolvedValue({ + APP_MODE: "saas", + GITHUB_CLIENT_ID: "123", + POSTHOG_CLIENT_KEY: "456", + }); + + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + }); + + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + + const input = await screen.findByTestId("runtime-settings-input"); + await user.click(input); + + const option = await screen.findByText("2x (4 core, 16G)"); + await user.click(option); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + remote_runtime_resource_factor: 2, + }), + ); + }); + + test("saving with no changes but having advanced enabled should hide the advanced items", async () => { + const user = userEvent.setup(); + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + await waitFor(() => { + expect( + screen.queryByTestId("llm-custom-model-input"), + ).not.toBeInTheDocument(); + expect( + screen.queryByTestId("base-url-input"), + ).not.toBeInTheDocument(); + expect(screen.queryByTestId("agent-input")).not.toBeInTheDocument(); + expect( + screen.queryByTestId("security-analyzer-input"), + ).not.toBeInTheDocument(); + expect( + screen.queryByTestId("enable-confirmation-mode-switch"), + ).not.toBeInTheDocument(); + }); + }); + + test("resetting settings with no changes but having advanced enabled should hide the advanced items", async () => { + const user = userEvent.setup(); + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + + const resetButton = screen.getByText("Reset to defaults"); + await user.click(resetButton); + + // show modal + const modal = await screen.findByTestId("reset-modal"); + expect(modal).toBeInTheDocument(); + + // confirm reset + const confirmButton = within(modal).getByText("Reset"); + await user.click(confirmButton); + + await waitFor(() => { + expect( + screen.queryByTestId("llm-custom-model-input"), + ).not.toBeInTheDocument(); + expect( + screen.queryByTestId("base-url-input"), + ).not.toBeInTheDocument(); + expect(screen.queryByTestId("agent-input")).not.toBeInTheDocument(); + expect( + screen.queryByTestId("security-analyzer-input"), + ).not.toBeInTheDocument(); + expect( + screen.queryByTestId("enable-confirmation-mode-switch"), + ).not.toBeInTheDocument(); + }); + }); + + it("should save if only confirmation mode is enabled", async () => { + const user = userEvent.setup(); + renderSettingsScreen(); + + await toggleAdvancedSettings(user); + + const confirmationModeSwitch = await screen.findByTestId( + "enable-confirmation-mode-switch", + ); + await user.click(confirmationModeSwitch); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + confirmation_mode: true, + }), + ); + }); + }); + + it("should toggle advanced if user had set a custom model", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + llm_model: "some/custom-model", + }); + renderSettingsScreen(); + + await waitFor(() => { + const advancedSwitch = screen.getByTestId("advanced-settings-switch"); + expect(advancedSwitch).toBeChecked(); + + const llmCustomInput = screen.getByTestId("llm-custom-model-input"); + expect(llmCustomInput).toBeInTheDocument(); + expect(llmCustomInput).toHaveValue("some/custom-model"); + }); + }); + + it("should have advanced settings enabled if the user previously had them enabled", async () => { + const hasAdvancedSettingsSetSpy = vi.spyOn( + AdvancedSettingsUtlls, + "hasAdvancedSettingsSet", + ); + hasAdvancedSettingsSetSpy.mockReturnValue(true); + + renderSettingsScreen(); + + await waitFor(() => { + const advancedSwitch = screen.getByTestId("advanced-settings-switch"); + expect(advancedSwitch).toBeChecked(); + + const llmCustomInput = screen.getByTestId("llm-custom-model-input"); + expect(llmCustomInput).toBeInTheDocument(); + }); + }); + + it("should have confirmation mode enabled if the user previously had it enabled", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + confirmation_mode: true, + }); + + renderSettingsScreen(); + + await waitFor(() => { + const confirmationModeSwitch = screen.getByTestId( + "enable-confirmation-mode-switch", + ); + expect(confirmationModeSwitch).toBeChecked(); + }); + }); + + // FIXME: security analyzer is not found for some reason... + it.skip("should have the values set if the user previously had them set", async () => { + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + language: "no", + github_token_is_set: true, + user_consents_to_analytics: true, + llm_base_url: "https://test.com", + llm_model: "anthropic/claude-3-5-sonnet-20241022", + agent: "CoActAgent", + security_analyzer: "mock-invariant", + }); + + renderSettingsScreen(); + + await waitFor(() => { + expect(screen.getByTestId("language-input")).toHaveValue("Norsk"); + expect(screen.getByText("Disconnect from GitHub")).toBeInTheDocument(); + expect(screen.getByTestId("enable-analytics-switch")).toBeChecked(); + expect(screen.getByTestId("advanced-settings-switch")).toBeChecked(); + expect(screen.getByTestId("base-url-input")).toHaveValue( + "https://test.com", + ); + expect(screen.getByTestId("llm-custom-model-input")).toHaveValue( + "anthropic/claude-3-5-sonnet-20241022", + ); + expect(screen.getByTestId("agent-input")).toHaveValue("CoActAgent"); + expect( + screen.getByTestId("enable-confirmation-mode-switch"), + ).toBeChecked(); + expect(screen.getByTestId("security-analyzer-input")).toHaveValue( + "mock-invariant", + ); + }); + }); + + it("should save the settings when the 'Save Changes' button is clicked", async () => { + const user = userEvent.setup(); + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + }); + + renderSettingsScreen(); + + const languageInput = await screen.findByTestId("language-input"); + await user.click(languageInput); + + const norskOption = await screen.findByText("Norsk"); + await user.click(norskOption); + + expect(languageInput).toHaveValue("Norsk"); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + llm_api_key: undefined, + github_token: undefined, + language: "no", + }), + ); + }); + + it("should properly save basic LLM model settings", async () => { + const user = userEvent.setup(); + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + }); + + renderSettingsScreen(); + + // disable advanced mode + const advancedSwitch = await screen.findByTestId( + "advanced-settings-switch", + ); + await user.click(advancedSwitch); + + const providerInput = await screen.findByTestId("llm-provider-input"); + await user.click(providerInput); + + const openaiOption = await screen.findByText("OpenAI"); + await user.click(openaiOption); + + const modelInput = await screen.findByTestId("llm-model-input"); + await user.click(modelInput); + + const gpt4Option = await screen.findByText("gpt-4o"); + await user.click(gpt4Option); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + github_token: undefined, + llm_api_key: undefined, + llm_model: "openai/gpt-4o", + }), + ); + }); + + it("should reset the settings when the 'Reset to defaults' button is clicked", async () => { + const user = userEvent.setup(); + getSettingsSpy.mockResolvedValue(MOCK_DEFAULT_USER_SETTINGS); + + renderSettingsScreen(); + + const languageInput = await screen.findByTestId("language-input"); + await user.click(languageInput); + + const norskOption = await screen.findByText("Norsk"); + await user.click(norskOption); + + expect(languageInput).toHaveValue("Norsk"); + + const resetButton = screen.getByText("Reset to defaults"); + await user.click(resetButton); + + expect(saveSettingsSpy).not.toHaveBeenCalled(); + + // show modal + const modal = await screen.findByTestId("reset-modal"); + expect(modal).toBeInTheDocument(); + + // confirm reset + const confirmButton = within(modal).getByText("Reset"); + await user.click(confirmButton); + + const mockCopy: Partial = { + ...MOCK_DEFAULT_USER_SETTINGS, + }; + delete mockCopy.github_token_is_set; + delete mockCopy.unset_github_token; + delete mockCopy.user_consents_to_analytics; + + expect(saveSettingsSpy).toHaveBeenCalledWith({ + ...mockCopy, + github_token: undefined, // not set + llm_api_key: "", // reset as well + }); + expect(screen.queryByTestId("reset-modal")).not.toBeInTheDocument(); + }); + + it("should cancel the reset when the 'Cancel' button is clicked", async () => { + const user = userEvent.setup(); + getSettingsSpy.mockResolvedValue(MOCK_DEFAULT_USER_SETTINGS); + + renderSettingsScreen(); + + const resetButton = await screen.findByText("Reset to defaults"); + await user.click(resetButton); + + const modal = await screen.findByTestId("reset-modal"); + expect(modal).toBeInTheDocument(); + + const cancelButton = within(modal).getByText("Cancel"); + await user.click(cancelButton); + + expect(saveSettingsSpy).not.toHaveBeenCalled(); + expect(screen.queryByTestId("reset-modal")).not.toBeInTheDocument(); + }); + + it("should call handleCaptureConsent with true if the save is successful", async () => { + const user = userEvent.setup(); + const handleCaptureConsentSpy = vi.spyOn( + ConsentHandlers, + "handleCaptureConsent", + ); + renderSettingsScreen(); + + const analyticsConsentInput = await screen.findByTestId( + "enable-analytics-switch", + ); + + expect(analyticsConsentInput).not.toBeChecked(); + await user.click(analyticsConsentInput); + expect(analyticsConsentInput).toBeChecked(); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + expect(handleCaptureConsentSpy).toHaveBeenCalledWith(true); + }); + + it("should call handleCaptureConsent with false if the save is successful", async () => { + const user = userEvent.setup(); + const handleCaptureConsentSpy = vi.spyOn( + ConsentHandlers, + "handleCaptureConsent", + ); + renderSettingsScreen(); + + const saveButton = await screen.findByText("Save Changes"); + await user.click(saveButton); + + expect(handleCaptureConsentSpy).toHaveBeenCalledWith(false); + }); + + it("should not reset analytics consent when resetting to defaults", async () => { + const user = userEvent.setup(); + getSettingsSpy.mockResolvedValue({ + ...MOCK_DEFAULT_USER_SETTINGS, + user_consents_to_analytics: true, + }); + + renderSettingsScreen(); + + const analyticsConsentInput = await screen.findByTestId( + "enable-analytics-switch", + ); + expect(analyticsConsentInput).toBeChecked(); + + const resetButton = await screen.findByText("Reset to defaults"); + await user.click(resetButton); + + const modal = await screen.findByTestId("reset-modal"); + const confirmButton = within(modal).getByText("Reset"); + await user.click(confirmButton); + + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ user_consents_to_analytics: undefined }), + ); + }); + + it("should render the security analyzer input if the confirmation mode is enabled", async () => { + const user = userEvent.setup(); + renderSettingsScreen(); + + let securityAnalyzerInput = screen.queryByTestId( + "security-analyzer-input", + ); + expect(securityAnalyzerInput).not.toBeInTheDocument(); + + const confirmationModeSwitch = await screen.findByTestId( + "enable-confirmation-mode-switch", + ); + await user.click(confirmationModeSwitch); + + securityAnalyzerInput = await screen.findByTestId( + "security-analyzer-input", + ); + expect(securityAnalyzerInput).toBeInTheDocument(); + }); + + // FIXME: localStorage isn't being set + it.skip("should save with ENABLE_DEFAULT_CONDENSER with true if user set the feature flag in local storage", async () => { + localStorage.setItem("ENABLE_DEFAULT_CONDENSER", "true"); + + const user = userEvent.setup(); + renderSettingsScreen(); + + const saveButton = screen.getByText("Save Changes"); + await user.click(saveButton); + + expect(saveSettingsSpy).toHaveBeenCalledWith( + expect.objectContaining({ + enable_default_condenser: true, + }), + ); + }); + }); +}); diff --git a/frontend/__tests__/utils/has-advanced-settings-set.test.ts b/frontend/__tests__/utils/has-advanced-settings-set.test.ts new file mode 100644 index 000000000000..73568ccd98e2 --- /dev/null +++ b/frontend/__tests__/utils/has-advanced-settings-set.test.ts @@ -0,0 +1,56 @@ +import { describe, expect, it, test } from "vitest"; +import { hasAdvancedSettingsSet } from "#/utils/has-advanced-settings-set"; +import { DEFAULT_SETTINGS } from "#/services/settings"; + +describe("hasAdvancedSettingsSet", () => { + it("should return false by default", () => { + expect(hasAdvancedSettingsSet(DEFAULT_SETTINGS)).toBe(false); + }); + + describe("should be true if", () => { + test("LLM_BASE_URL is set", () => { + expect( + hasAdvancedSettingsSet({ + ...DEFAULT_SETTINGS, + LLM_BASE_URL: "test", + }), + ).toBe(true); + }); + + test("AGENT is not default value", () => { + expect( + hasAdvancedSettingsSet({ + ...DEFAULT_SETTINGS, + AGENT: "test", + }), + ).toBe(true); + }); + + test("REMOTE_RUNTIME_RESOURCE_FACTOR is not default value", () => { + expect( + hasAdvancedSettingsSet({ + ...DEFAULT_SETTINGS, + REMOTE_RUNTIME_RESOURCE_FACTOR: 999, + }), + ).toBe(true); + }); + + test("CONFIRMATION_MODE is true", () => { + expect( + hasAdvancedSettingsSet({ + ...DEFAULT_SETTINGS, + CONFIRMATION_MODE: true, + }), + ).toBe(true); + }); + + test("SECURITY_ANALYZER is set", () => { + expect( + hasAdvancedSettingsSet({ + ...DEFAULT_SETTINGS, + SECURITY_ANALYZER: "test", + }), + ).toBe(true); + }); + }); +}); diff --git a/frontend/__tests__/utils/is-custom-model.test.ts b/frontend/__tests__/utils/is-custom-model.test.ts new file mode 100644 index 000000000000..1da5667920ae --- /dev/null +++ b/frontend/__tests__/utils/is-custom-model.test.ts @@ -0,0 +1,20 @@ +import { describe, expect, it } from "vitest"; +import { isCustomModel } from "#/utils/is-custom-model"; + +describe("isCustomModel", () => { + const models = ["anthropic/claude-3.5", "openai/gpt-3.5-turbo", "gpt-4o"]; + + it("should return false by default", () => { + expect(isCustomModel(models, "")).toBe(false); + }); + + it("should be true if it is a custom model", () => { + expect(isCustomModel(models, "some/model")).toBe(true); + }); + + it("should be false if it is not a custom model", () => { + expect(isCustomModel(models, "anthropic/claude-3.5")).toBe(false); + expect(isCustomModel(models, "openai/gpt-3.5-turbo")).toBe(false); + expect(isCustomModel(models, "openai/gpt-4o")).toBe(false); + }); +}); diff --git a/frontend/src/api/open-hands.ts b/frontend/src/api/open-hands.ts index 0f8771a161b3..e77d7a5527c2 100644 --- a/frontend/src/api/open-hands.ts +++ b/frontend/src/api/open-hands.ts @@ -13,7 +13,7 @@ import { GetTrajectoryResponse, } from "./open-hands.types"; import { openHands } from "./open-hands-axios"; -import { ApiSettings } from "#/types/settings"; +import { ApiSettings, PostApiSettings } from "#/types/settings"; class OpenHands { /** @@ -267,7 +267,9 @@ class OpenHands { * Save the settings to the server. Only valid settings are saved. * @param settings - the settings to save */ - static async saveSettings(settings: Partial): Promise { + static async saveSettings( + settings: Partial, + ): Promise { const data = await openHands.post("/api/settings", settings); return data.status === 200; } diff --git a/frontend/src/components/features/analytics/analytics-consent-form-modal.tsx b/frontend/src/components/features/analytics/analytics-consent-form-modal.tsx index 21c0c04a0438..ea22445db07f 100644 --- a/frontend/src/components/features/analytics/analytics-consent-form-modal.tsx +++ b/frontend/src/components/features/analytics/analytics-consent-form-modal.tsx @@ -27,11 +27,10 @@ export function AnalyticsConsentFormModal({ { onSuccess: () => { handleCaptureConsent(analytics); + onClose(); }, }, ); - - onClose(); }; return ( diff --git a/frontend/src/components/features/context-menu/account-settings-context-menu.tsx b/frontend/src/components/features/context-menu/account-settings-context-menu.tsx index 92a3359e83c9..8be19387f52f 100644 --- a/frontend/src/components/features/context-menu/account-settings-context-menu.tsx +++ b/frontend/src/components/features/context-menu/account-settings-context-menu.tsx @@ -1,19 +1,16 @@ import { useTranslation } from "react-i18next"; import { ContextMenu } from "./context-menu"; import { ContextMenuListItem } from "./context-menu-list-item"; -import { ContextMenuSeparator } from "./context-menu-separator"; import { useClickOutsideElement } from "#/hooks/use-click-outside-element"; import { I18nKey } from "#/i18n/declaration"; interface AccountSettingsContextMenuProps { - onClickAccountSettings: () => void; onLogout: () => void; onClose: () => void; isLoggedIn: boolean; } export function AccountSettingsContextMenu({ - onClickAccountSettings, onLogout, onClose, isLoggedIn, @@ -27,13 +24,6 @@ export function AccountSettingsContextMenu({ ref={ref} className="absolute left-full -top-1 z-10" > - - {t(I18nKey.ACCOUNT_SETTINGS$SETTINGS)} - - {t(I18nKey.ACCOUNT_SETTINGS$LOGOUT)} diff --git a/frontend/src/components/features/github/github-repositories-suggestion-box.tsx b/frontend/src/components/features/github/github-repositories-suggestion-box.tsx index 45eb5278068d..2450dd7b59e6 100644 --- a/frontend/src/components/features/github/github-repositories-suggestion-box.tsx +++ b/frontend/src/components/features/github/github-repositories-suggestion-box.tsx @@ -1,5 +1,6 @@ import React from "react"; import { useTranslation } from "react-i18next"; +import { useNavigate } from "react-router"; import { I18nKey } from "#/i18n/declaration"; import { SuggestionBox } from "#/components/features/suggestions/suggestion-box"; import GitHubLogo from "#/assets/branding/github-logo.svg?react"; @@ -10,7 +11,6 @@ import { useSearchRepositories } from "#/hooks/query/use-search-repositories"; import { useUserRepositories } from "#/hooks/query/use-user-repositories"; import { sanitizeQuery } from "#/utils/sanitize-query"; import { useDebounce } from "#/hooks/use-debounce"; -import { AccountSettingsModal } from "#/components/shared/modals/account-settings/account-settings-modal"; interface GitHubRepositoriesSuggestionBoxProps { handleSubmit: () => void; @@ -24,8 +24,7 @@ export function GitHubRepositoriesSuggestionBox({ user, }: GitHubRepositoriesSuggestionBoxProps) { const { t } = useTranslation(); - const [connectToGitHubModalOpen, setConnectToGitHubModalOpen] = - React.useState(false); + const navigate = useNavigate(); const [searchQuery, setSearchQuery] = React.useState(""); const debouncedSearchQuery = useDebounce(searchQuery, 300); @@ -45,39 +44,33 @@ export function GitHubRepositoriesSuggestionBox({ if (gitHubAuthUrl) { window.location.href = gitHubAuthUrl; } else { - setConnectToGitHubModalOpen(true); + navigate("/settings"); } }; const isLoggedIn = !!user; return ( - <> - - ) : ( - } - className="bg-[#791B80] w-full" - onClick={handleConnectToGitHub} - /> - ) - } - /> - {connectToGitHubModalOpen && ( - setConnectToGitHubModalOpen(false)} - /> - )} - + + ) : ( + } + className="bg-[#791B80] w-full" + onClick={handleConnectToGitHub} + /> + ) + } + /> ); } diff --git a/frontend/src/components/features/settings/brand-button.tsx b/frontend/src/components/features/settings/brand-button.tsx new file mode 100644 index 000000000000..b4d2dc24aa5f --- /dev/null +++ b/frontend/src/components/features/settings/brand-button.tsx @@ -0,0 +1,39 @@ +import { cn } from "#/utils/utils"; + +interface BrandButtonProps { + testId?: string; + variant: "primary" | "secondary"; + type: React.ButtonHTMLAttributes["type"]; + isDisabled?: boolean; + className?: string; + onClick?: () => void; +} + +export function BrandButton({ + testId, + children, + variant, + type, + isDisabled, + className, + onClick, +}: React.PropsWithChildren) { + return ( + + ); +} diff --git a/frontend/src/components/features/settings/help-link.tsx b/frontend/src/components/features/settings/help-link.tsx new file mode 100644 index 000000000000..984f279de230 --- /dev/null +++ b/frontend/src/components/features/settings/help-link.tsx @@ -0,0 +1,22 @@ +interface HelpLinkProps { + testId: string; + text: string; + linkText: string; + href: string; +} + +export function HelpLink({ testId, text, linkText, href }: HelpLinkProps) { + return ( +

+ {text}{" "} + + {linkText} + +

+ ); +} diff --git a/frontend/src/components/features/settings/key-status-icon.tsx b/frontend/src/components/features/settings/key-status-icon.tsx new file mode 100644 index 000000000000..ad4bd3cf8343 --- /dev/null +++ b/frontend/src/components/features/settings/key-status-icon.tsx @@ -0,0 +1,16 @@ +import SuccessIcon from "#/icons/success.svg?react"; +import { cn } from "#/utils/utils"; + +interface KeyStatusIconProps { + isSet: boolean; +} + +export function KeyStatusIcon({ isSet }: KeyStatusIconProps) { + return ( + + + + ); +} diff --git a/frontend/src/components/features/settings/optional-tag.tsx b/frontend/src/components/features/settings/optional-tag.tsx new file mode 100644 index 000000000000..3df207fc1b94 --- /dev/null +++ b/frontend/src/components/features/settings/optional-tag.tsx @@ -0,0 +1,3 @@ +export function OptionalTag() { + return (Optional); +} diff --git a/frontend/src/components/features/settings/settings-dropdown-input.tsx b/frontend/src/components/features/settings/settings-dropdown-input.tsx new file mode 100644 index 000000000000..69385bf08f48 --- /dev/null +++ b/frontend/src/components/features/settings/settings-dropdown-input.tsx @@ -0,0 +1,56 @@ +import { Autocomplete, AutocompleteItem } from "@heroui/react"; +import { OptionalTag } from "./optional-tag"; + +interface SettingsDropdownInputProps { + testId: string; + label: string; + name: string; + items: { key: React.Key; label: string }[]; + showOptionalTag?: boolean; + isDisabled?: boolean; + defaultSelectedKey?: string; + isClearable?: boolean; +} + +export function SettingsDropdownInput({ + testId, + label, + name, + items, + showOptionalTag, + isDisabled, + defaultSelectedKey, + isClearable, +}: SettingsDropdownInputProps) { + return ( + + ); +} diff --git a/frontend/src/components/features/settings/settings-input.tsx b/frontend/src/components/features/settings/settings-input.tsx new file mode 100644 index 000000000000..5362af09aa28 --- /dev/null +++ b/frontend/src/components/features/settings/settings-input.tsx @@ -0,0 +1,50 @@ +import { cn } from "#/utils/utils"; +import { OptionalTag } from "./optional-tag"; + +interface SettingsInputProps { + testId?: string; + name?: string; + label: string; + type: React.HTMLInputTypeAttribute; + defaultValue?: string; + placeholder?: string; + showOptionalTag?: boolean; + isDisabled?: boolean; + startContent?: React.ReactNode; + className?: string; +} + +export function SettingsInput({ + testId, + name, + label, + type, + defaultValue, + placeholder, + showOptionalTag, + isDisabled, + startContent, + className, +}: SettingsInputProps) { + return ( + + ); +} diff --git a/frontend/src/components/features/settings/settings-switch.tsx b/frontend/src/components/features/settings/settings-switch.tsx new file mode 100644 index 000000000000..d1bfaff94935 --- /dev/null +++ b/frontend/src/components/features/settings/settings-switch.tsx @@ -0,0 +1,50 @@ +import React from "react"; +import { StyledSwitchComponent } from "./styled-switch-component"; + +interface SettingsSwitchProps { + testId?: string; + name?: string; + onToggle?: (value: boolean) => void; + defaultIsToggled?: boolean; + isBeta?: boolean; +} + +export function SettingsSwitch({ + children, + testId, + name, + onToggle, + defaultIsToggled, + isBeta, +}: React.PropsWithChildren) { + const [isToggled, setIsToggled] = React.useState(defaultIsToggled ?? false); + + const handleToggle = (value: boolean) => { + setIsToggled(value); + onToggle?.(value); + }; + + return ( + + ); +} diff --git a/frontend/src/components/features/settings/styled-switch-component.tsx b/frontend/src/components/features/settings/styled-switch-component.tsx new file mode 100644 index 000000000000..36d9ffda6bfb --- /dev/null +++ b/frontend/src/components/features/settings/styled-switch-component.tsx @@ -0,0 +1,26 @@ +import { cn } from "#/utils/utils"; + +interface StyledSwitchComponentProps { + isToggled: boolean; +} + +export function StyledSwitchComponent({ + isToggled, +}: StyledSwitchComponentProps) { + return ( +
+
+
+ ); +} diff --git a/frontend/src/components/features/sidebar/sidebar.tsx b/frontend/src/components/features/sidebar/sidebar.tsx index 777878a1821e..645543ac6fd2 100644 --- a/frontend/src/components/features/sidebar/sidebar.tsx +++ b/frontend/src/components/features/sidebar/sidebar.tsx @@ -3,6 +3,7 @@ import { FaListUl } from "react-icons/fa"; import { useDispatch } from "react-redux"; import posthog from "posthog-js"; import toast from "react-hot-toast"; +import { NavLink } from "react-router"; import { useGitHubUser } from "#/hooks/query/use-github-user"; import { UserActions } from "./user-actions"; import { AllHandsLogoButton } from "#/components/shared/buttons/all-hands-logo-button"; @@ -10,7 +11,6 @@ import { DocsButton } from "#/components/shared/buttons/docs-button"; import { ExitProjectButton } from "#/components/shared/buttons/exit-project-button"; import { SettingsButton } from "#/components/shared/buttons/settings-button"; import { LoadingSpinner } from "#/components/shared/loading-spinner"; -import { AccountSettingsModal } from "#/components/shared/modals/account-settings/account-settings-modal"; import { SettingsModal } from "#/components/shared/modals/settings/settings-modal"; import { useCurrentSettings } from "#/context/settings-context"; import { useSettings } from "#/hooks/query/use-settings"; @@ -30,28 +30,18 @@ export function Sidebar() { const user = useGitHubUser(); const { data: config } = useConfig(); const { - data: settings, error: settingsError, isError: settingsIsError, isFetching: isFetchingSettings, } = useSettings(); const { mutateAsync: logout } = useLogout(); - const { saveUserSettings } = useCurrentSettings(); + const { settings, saveUserSettings } = useCurrentSettings(); - const [accountSettingsModalOpen, setAccountSettingsModalOpen] = - React.useState(false); const [settingsModalIsOpen, setSettingsModalIsOpen] = React.useState(false); const [conversationPanelIsOpen, setConversationPanelIsOpen] = React.useState(false); - React.useEffect(() => { - // If the github token is invalid, open the account settings modal again - if (user.isError) { - setAccountSettingsModalOpen(true); - } - }, [user.isError]); - React.useEffect(() => { // We don't show toast errors for settings in the global error handler // because we have a special case for 404 errors @@ -63,6 +53,8 @@ export function Sidebar() { toast.error( "Something went wrong while fetching settings. Please reload the page.", ); + } else if (settingsError?.status === 404) { + setSettingsModalIsOpen(true); } }, [settingsError?.status, settingsError, isFetchingSettings]); @@ -71,10 +63,6 @@ export function Sidebar() { endSession(); }; - const handleAccountSettingsModalClose = () => { - setAccountSettingsModalOpen(false); - }; - const handleLogout = async () => { if (config?.APP_MODE === "saas") await logout(); else await saveUserSettings({ unset_github_token: true }); @@ -84,33 +72,44 @@ export function Sidebar() { return ( <> - {accountSettingsModalOpen && ( - - )} - {(settingsError?.status === 404 || settingsModalIsOpen) && ( + {settingsModalIsOpen && ( setSettingsModalIsOpen(false)} diff --git a/frontend/src/components/features/sidebar/user-actions.tsx b/frontend/src/components/features/sidebar/user-actions.tsx index 359c8fbbb314..0ac8dc95f532 100644 --- a/frontend/src/components/features/sidebar/user-actions.tsx +++ b/frontend/src/components/features/sidebar/user-actions.tsx @@ -3,16 +3,11 @@ import { UserAvatar } from "./user-avatar"; import { AccountSettingsContextMenu } from "../context-menu/account-settings-context-menu"; interface UserActionsProps { - onClickAccountSettings: () => void; onLogout: () => void; user?: { avatar_url: string }; } -export function UserActions({ - onClickAccountSettings, - onLogout, - user, -}: UserActionsProps) { +export function UserActions({ onLogout, user }: UserActionsProps) { const [accountContextMenuIsVisible, setAccountContextMenuIsVisible] = React.useState(false); @@ -24,11 +19,6 @@ export function UserActions({ setAccountContextMenuIsVisible(false); }; - const handleClickAccountSettings = () => { - onClickAccountSettings(); - closeAccountMenu(); - }; - const handleLogout = () => { onLogout(); closeAccountMenu(); @@ -41,7 +31,6 @@ export function UserActions({ {accountContextMenuIsVisible && ( diff --git a/frontend/src/components/features/sidebar/user-avatar.tsx b/frontend/src/components/features/sidebar/user-avatar.tsx index 3857f8f52d4b..3e5d0fda57fb 100644 --- a/frontend/src/components/features/sidebar/user-avatar.tsx +++ b/frontend/src/components/features/sidebar/user-avatar.tsx @@ -1,7 +1,7 @@ import { useTranslation } from "react-i18next"; import { I18nKey } from "#/i18n/declaration"; import { LoadingSpinner } from "#/components/shared/loading-spinner"; -import DefaultUserAvatar from "#/icons/default-user.svg?react"; +import ProfileIcon from "#/icons/profile.svg?react"; import { cn } from "#/utils/utils"; import { Avatar } from "./avatar"; import { TooltipButton } from "#/components/shared/buttons/tooltip-button"; @@ -21,16 +21,17 @@ export function UserAvatar({ onClick, avatarUrl, isLoading }: UserAvatarProps) { ariaLabel={t(I18nKey.USER$ACCOUNT_SETTINGS)} onClick={onClick} className={cn( - "w-8 h-8 rounded-full flex items-center justify-center border-2 border-gray-200", + "w-8 h-8 rounded-full flex items-center justify-center", isLoading && "bg-transparent", )} > {!isLoading && avatarUrl && } {!isLoading && !avatarUrl && ( - )} {isLoading && } diff --git a/frontend/src/components/shared/buttons/all-hands-logo-button.tsx b/frontend/src/components/shared/buttons/all-hands-logo-button.tsx index f8af9ac47651..e068f152af13 100644 --- a/frontend/src/components/shared/buttons/all-hands-logo-button.tsx +++ b/frontend/src/components/shared/buttons/all-hands-logo-button.tsx @@ -12,7 +12,7 @@ export function AllHandsLogoButton({ onClick }: AllHandsLogoButtonProps) { ariaLabel="All Hands Logo" onClick={onClick} > - + ); } diff --git a/frontend/src/components/shared/buttons/docs-button.tsx b/frontend/src/components/shared/buttons/docs-button.tsx index d2cafe2422cb..4c2b248e0b8e 100644 --- a/frontend/src/components/shared/buttons/docs-button.tsx +++ b/frontend/src/components/shared/buttons/docs-button.tsx @@ -1,5 +1,5 @@ import { useTranslation } from "react-i18next"; -import DocsIcon from "#/icons/docs.svg?react"; +import DocsIcon from "#/icons/academy.svg?react"; import { I18nKey } from "#/i18n/declaration"; import { TooltipButton } from "./tooltip-button"; @@ -11,7 +11,7 @@ export function DocsButton() { ariaLabel={t(I18nKey.SIDEBAR$DOCS)} href="https://docs.all-hands.dev" > - + ); } diff --git a/frontend/src/components/shared/buttons/exit-project-button.tsx b/frontend/src/components/shared/buttons/exit-project-button.tsx index 5a6db074f616..eaef96680ae7 100644 --- a/frontend/src/components/shared/buttons/exit-project-button.tsx +++ b/frontend/src/components/shared/buttons/exit-project-button.tsx @@ -1,6 +1,6 @@ import { useTranslation } from "react-i18next"; import { I18nKey } from "#/i18n/declaration"; -import NewProjectIcon from "#/icons/new-project.svg?react"; +import PlusIcon from "#/icons/plus.svg?react"; import { TooltipButton } from "./tooltip-button"; interface ExitProjectButtonProps { @@ -17,7 +17,7 @@ export function ExitProjectButton({ onClick }: ExitProjectButtonProps) { onClick={onClick} testId="new-project-button" > - + ); } diff --git a/frontend/src/components/shared/buttons/settings-button.tsx b/frontend/src/components/shared/buttons/settings-button.tsx index 2b792e5ed4c4..80bd5eee8a24 100644 --- a/frontend/src/components/shared/buttons/settings-button.tsx +++ b/frontend/src/components/shared/buttons/settings-button.tsx @@ -1,14 +1,15 @@ -import { FaCog } from "react-icons/fa"; import { useTranslation } from "react-i18next"; +import SettingsIcon from "#/icons/settings.svg?react"; import { TooltipButton } from "./tooltip-button"; import { I18nKey } from "#/i18n/declaration"; interface SettingsButtonProps { - onClick: () => void; + onClick?: () => void; } export function SettingsButton({ onClick }: SettingsButtonProps) { const { t } = useTranslation(); + return ( - + ); } diff --git a/frontend/src/components/shared/modals/account-settings/account-settings-form.tsx b/frontend/src/components/shared/modals/account-settings/account-settings-form.tsx deleted file mode 100644 index cbce86d6ee0c..000000000000 --- a/frontend/src/components/shared/modals/account-settings/account-settings-form.tsx +++ /dev/null @@ -1,160 +0,0 @@ -import React from "react"; -import { useTranslation } from "react-i18next"; -import posthog from "posthog-js"; -import { - BaseModalDescription, - BaseModalTitle, -} from "../confirmation-modals/base-modal"; -import { ModalBody } from "../modal-body"; -import { AvailableLanguages } from "#/i18n"; -import { I18nKey } from "#/i18n/declaration"; -import { handleCaptureConsent } from "#/utils/handle-capture-consent"; -import { ModalButton } from "../../buttons/modal-button"; -import { FormFieldset } from "../../form-fieldset"; -import { useConfig } from "#/hooks/query/use-config"; -import { useCurrentSettings } from "#/context/settings-context"; -import { GitHubTokenInput } from "./github-token-input"; -import { PostSettings } from "#/types/settings"; -import { useGitHubUser } from "#/hooks/query/use-github-user"; - -interface AccountSettingsFormProps { - onClose: () => void; -} - -export function AccountSettingsForm({ onClose }: AccountSettingsFormProps) { - const { isError: isGitHubError } = useGitHubUser(); - const { data: config } = useConfig(); - const { saveUserSettings, settings } = useCurrentSettings(); - const { t } = useTranslation(); - - const githubTokenIsSet = !!settings?.GITHUB_TOKEN_IS_SET; - const analyticsConsentValue = !!settings?.USER_CONSENTS_TO_ANALYTICS; - const selectedLanguage = settings?.LANGUAGE || "en"; - - const handleSubmit = async (event: React.FormEvent) => { - event.preventDefault(); - const formData = new FormData(event.currentTarget); - - const ghToken = formData.get("ghToken")?.toString(); - const language = formData.get("language")?.toString(); - const analytics = formData.get("analytics")?.toString() === "on"; - - const newSettings: Partial = {}; - newSettings.user_consents_to_analytics = analytics; - - if (ghToken) newSettings.github_token = ghToken; - - // The form returns the language label, so we need to find the corresponding - // language key to save it in the settings - if (language) { - const languageKey = AvailableLanguages.find( - ({ label }) => label === language, - )?.value; - - if (languageKey) newSettings.LANGUAGE = languageKey; - } - - await saveUserSettings(newSettings, { - onSuccess: () => { - handleCaptureConsent(analytics); - }, - }); - - onClose(); - }; - - const onDisconnect = async () => { - await saveUserSettings({ unset_github_token: true }); - posthog.reset(); - onClose(); - }; - - return ( - -
-
- - - {config?.APP_MODE === "saas" && config?.APP_SLUG && ( - - {t(I18nKey.GITHUB$CONFIGURE_REPOS)} - - )} - ({ - key, - value: label, - }))} - /> - - {config?.APP_MODE !== "saas" && ( - <> - - {!githubTokenIsSet && ( - - {t(I18nKey.GITHUB$GET_TOKEN)}{" "} - - {t(I18nKey.COMMON$HERE)} - - - )} - - )} - {isGitHubError && ( -

- {t(I18nKey.GITHUB$TOKEN_INVALID)} -

- )} - {githubTokenIsSet && !isGitHubError && ( - - )} -
- - - -
- - -
-
-
- ); -} diff --git a/frontend/src/components/shared/modals/account-settings/account-settings-modal.tsx b/frontend/src/components/shared/modals/account-settings/account-settings-modal.tsx deleted file mode 100644 index 49a0d54f3ef8..000000000000 --- a/frontend/src/components/shared/modals/account-settings/account-settings-modal.tsx +++ /dev/null @@ -1,14 +0,0 @@ -import { ModalBackdrop } from "../modal-backdrop"; -import { AccountSettingsForm } from "./account-settings-form"; - -interface AccountSettingsModalProps { - onClose: () => void; -} - -export function AccountSettingsModal({ onClose }: AccountSettingsModalProps) { - return ( - - - - ); -} diff --git a/frontend/src/components/shared/modals/account-settings/github-token-input.tsx b/frontend/src/components/shared/modals/account-settings/github-token-input.tsx deleted file mode 100644 index f5f4de4b226e..000000000000 --- a/frontend/src/components/shared/modals/account-settings/github-token-input.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import { useTranslation } from "react-i18next"; -import { FaCheckCircle } from "react-icons/fa"; -import { I18nKey } from "#/i18n/declaration"; - -interface GitHubTokenInputProps { - githubTokenIsSet: boolean; -} - -export function GitHubTokenInput({ githubTokenIsSet }: GitHubTokenInputProps) { - const { t } = useTranslation(); - - return ( - - ); -} diff --git a/frontend/src/components/shared/modals/settings/model-selector.tsx b/frontend/src/components/shared/modals/settings/model-selector.tsx index c169d67870b7..811113844132 100644 --- a/frontend/src/components/shared/modals/settings/model-selector.tsx +++ b/frontend/src/components/shared/modals/settings/model-selector.tsx @@ -65,107 +65,109 @@ export function ModelSelector({ const { t } = useTranslation(); return ( -
-
-
- - { - if (e?.toString()) handleChangeProvider(e.toString()); - }} - onInputChange={(value) => !value && clear()} - defaultSelectedKey={selectedProvider ?? undefined} - selectedKey={selectedProvider} - inputProps={{ - classNames: { - inputWrapper: "bg-[#27272A] rounded-md text-sm px-3 py-[10px]", - }, - }} - > - - {Object.keys(models) - .filter((provider) => VERIFIED_PROVIDERS.includes(provider)) - .map((provider) => ( - - {mapProvider(provider)} - - ))} - - - {Object.keys(models) - .filter((provider) => !VERIFIED_PROVIDERS.includes(provider)) - .map((provider) => ( - - {mapProvider(provider)} - - ))} - - -
+
+
+ + { + if (e?.toString()) handleChangeProvider(e.toString()); + }} + onInputChange={(value) => !value && clear()} + defaultSelectedKey={selectedProvider ?? undefined} + selectedKey={selectedProvider} + classNames={{ + popoverContent: "bg-[#454545] rounded-xl border border-[#717888]", + }} + inputProps={{ + classNames: { + inputWrapper: + "bg-[#454545] border border-[#717888] h-10 w-full rounded p-2 placeholder:italic", + }, + }} + > + + {Object.keys(models) + .filter((provider) => VERIFIED_PROVIDERS.includes(provider)) + .map((provider) => ( + + {mapProvider(provider)} + + ))} + + + {Object.keys(models) + .filter((provider) => !VERIFIED_PROVIDERS.includes(provider)) + .map((provider) => ( + + {mapProvider(provider)} + + ))} + + +
-
- - { - if (e?.toString()) handleChangeModel(e.toString()); - }} - isDisabled={isDisabled || !selectedProvider} - selectedKey={selectedModel} - defaultSelectedKey={selectedModel ?? undefined} - inputProps={{ - classNames: { - inputWrapper: "bg-[#27272A] rounded-md text-sm px-3 py-[10px]", - }, - }} - > - - {models[selectedProvider || ""]?.models - .filter((model) => VERIFIED_MODELS.includes(model)) - .map((model) => ( - - {model} - - ))} - - - {models[selectedProvider || ""]?.models - .filter((model) => !VERIFIED_MODELS.includes(model)) - .map((model) => ( - - {model} - - ))} - - -
-
+
+ + { + if (e?.toString()) handleChangeModel(e.toString()); + }} + isDisabled={isDisabled || !selectedProvider} + selectedKey={selectedModel} + defaultSelectedKey={selectedModel ?? undefined} + classNames={{ + popoverContent: "bg-[#454545] rounded-xl border border-[#717888]", + }} + inputProps={{ + classNames: { + inputWrapper: + "bg-[#454545] border border-[#717888] h-10 w-full rounded p-2 placeholder:italic", + }, + }} + > + + {models[selectedProvider || ""]?.models + .filter((model) => VERIFIED_MODELS.includes(model)) + .map((model) => ( + + {model} + + ))} + + + {models[selectedProvider || ""]?.models + .filter((model) => !VERIFIED_MODELS.includes(model)) + .map((model) => ( + + {model} + + ))} + + +
); } diff --git a/frontend/src/components/shared/modals/settings/settings-form.tsx b/frontend/src/components/shared/modals/settings/settings-form.tsx index 6ed859f47dbc..0c7ef45079ee 100644 --- a/frontend/src/components/shared/modals/settings/settings-form.tsx +++ b/frontend/src/components/shared/modals/settings/settings-form.tsx @@ -4,86 +4,34 @@ import React from "react"; import posthog from "posthog-js"; import { I18nKey } from "#/i18n/declaration"; import { organizeModelsAndProviders } from "#/utils/organize-models-and-providers"; -import { getDefaultSettings } from "#/services/settings"; -import { extractModelAndProvider } from "#/utils/extract-model-and-provider"; import { DangerModal } from "../confirmation-modals/danger-modal"; import { extractSettings } from "#/utils/settings-utils"; import { useEndSession } from "#/hooks/use-end-session"; -import { ModalButton } from "../../buttons/modal-button"; -import { AdvancedOptionSwitch } from "../../inputs/advanced-option-switch"; -import { AgentInput } from "../../inputs/agent-input"; -import { APIKeyInput } from "../../inputs/api-key-input"; -import { BaseUrlInput } from "../../inputs/base-url-input"; -import { ConfirmationModeSwitch } from "../../inputs/confirmation-mode-switch"; -import { CustomModelInput } from "../../inputs/custom-model-input"; -import { SecurityAnalyzerInput } from "../../inputs/security-analyzers-input"; import { ModalBackdrop } from "../modal-backdrop"; import { ModelSelector } from "./model-selector"; - -import { RuntimeSizeSelector } from "./runtime-size-selector"; -import { useConfig } from "#/hooks/query/use-config"; import { useCurrentSettings } from "#/context/settings-context"; import { MEMORY_CONDENSER } from "#/utils/feature-flags"; import { Settings } from "#/types/settings"; +import { BrandButton } from "#/components/features/settings/brand-button"; +import { KeyStatusIcon } from "#/components/features/settings/key-status-icon"; +import { SettingsInput } from "#/components/features/settings/settings-input"; +import { HelpLink } from "#/components/features/settings/help-link"; interface SettingsFormProps { - disabled?: boolean; settings: Settings; models: string[]; - agents: string[]; - securityAnalyzers: string[]; onClose: () => void; } -export function SettingsForm({ - disabled, - settings, - models, - agents, - securityAnalyzers, - onClose, -}: SettingsFormProps) { +export function SettingsForm({ settings, models, onClose }: SettingsFormProps) { const { saveUserSettings } = useCurrentSettings(); const endSession = useEndSession(); - const { data: config } = useConfig(); const location = useLocation(); const { t } = useTranslation(); const formRef = React.useRef(null); - const advancedAlreadyInUse = React.useMemo(() => { - if (models.length > 0) { - const organizedModels = organizeModelsAndProviders(models); - const { provider, model } = extractModelAndProvider( - settings.LLM_MODEL || "", - ); - const isKnownModel = - provider in organizedModels && - organizedModels[provider].models.includes(model); - - const isUsingSecurityAnalyzer = !!settings.SECURITY_ANALYZER; - const isUsingConfirmationMode = !!settings.CONFIRMATION_MODE; - const isUsingBaseUrl = !!settings.LLM_BASE_URL; - const isUsingCustomModel = !!settings.LLM_MODEL && !isKnownModel; - const isUsingDefaultCondenser = !!settings.ENABLE_DEFAULT_CONDENSER; - - return ( - isUsingSecurityAnalyzer || - isUsingConfirmationMode || - isUsingBaseUrl || - isUsingCustomModel || - isUsingDefaultCondenser - ); - } - - return false; - }, [settings, models]); - - const [showAdvancedOptions, setShowAdvancedOptions] = - React.useState(advancedAlreadyInUse); - const [confirmResetDefaultsModalOpen, setConfirmResetDefaultsModalOpen] = - React.useState(false); const [confirmEndSessionModalOpen, setConfirmEndSessionModalOpen] = React.useState(false); @@ -111,13 +59,6 @@ export function SettingsForm({ }); }; - const handleConfirmResetSettings = async () => { - await saveUserSettings(getDefaultSettings()); - onClose(); - resetOngoingSession(); - posthog.capture("settings_reset"); - }; - const handleConfirmEndSession = () => { const formData = new FormData(formRef.current ?? undefined); handleFormSubmission(formData); @@ -134,7 +75,7 @@ export function SettingsForm({ } }; - const isSaasMode = config?.APP_MODE === "saas"; + const isLLMKeySet = settings.LLM_API_KEY !== "**********"; return (
@@ -144,115 +85,41 @@ export function SettingsForm({ className="flex flex-col gap-6" onSubmit={handleSubmit} > -
- + - {showAdvancedOptions && ( - <> - - - - - )} - - {!showAdvancedOptions && ( - - )} - - } /> - {showAdvancedOptions && ( - <> - - - {isSaasMode && ( - - )} - - - - - - )} +
-
- - -
- { - setConfirmResetDefaultsModalOpen(true); - }} - /> + + {t(I18nKey.BUTTON$SAVE)} +
- {confirmResetDefaultsModalOpen && ( - - setConfirmResetDefaultsModalOpen(false), - }, - }} - /> - - )} {confirmEndSessionModalOpen && (
{aiConfigOptions.error && (

{aiConfigOptions.error.message}

)} - + {t(I18nKey.AI_SETTINGS$TITLE)}

- {t(I18nKey.SETTINGS$DESCRIPTION)} + {t(I18nKey.SETTINGS$DESCRIPTION)} For other options,{" "} + + see advanced settings +

-

{t(I18nKey.SETTINGS$WARNING)}

{aiConfigOptions.isLoading && (
@@ -41,8 +48,6 @@ export function SettingsModal({ onClose, settings }: SettingsModalProps) { )} diff --git a/frontend/src/context/settings-context.tsx b/frontend/src/context/settings-context.tsx index c3abbe3184ee..ac05ea92bd86 100644 --- a/frontend/src/context/settings-context.tsx +++ b/frontend/src/context/settings-context.tsx @@ -1,8 +1,10 @@ import React from "react"; import { MutateOptions } from "@tanstack/react-query"; +import toast from "react-hot-toast"; import { useSettings } from "#/hooks/query/use-settings"; import { useSaveSettings } from "#/hooks/mutation/use-save-settings"; import { PostSettings, Settings } from "#/types/settings"; +import { retrieveAxiosErrorMessage } from "#/utils/retrieve-axios-error-message"; type SaveUserSettingsConfig = { onSuccess: MutateOptions>["onSuccess"]; @@ -41,7 +43,13 @@ export function SettingsProvider({ children }: SettingsProviderProps) { delete updatedSettings.LLM_API_KEY; } - await saveSettings(updatedSettings, { onSuccess: config?.onSuccess }); + await saveSettings(updatedSettings, { + onSuccess: config?.onSuccess, + onError: (error) => { + const errorMessage = retrieveAxiosErrorMessage(error); + toast.error(errorMessage); + }, + }); }; const value = React.useMemo( diff --git a/frontend/src/hooks/mutation/use-save-settings.ts b/frontend/src/hooks/mutation/use-save-settings.ts index 77a34fbe108d..0a12f6f1eb4e 100644 --- a/frontend/src/hooks/mutation/use-save-settings.ts +++ b/frontend/src/hooks/mutation/use-save-settings.ts @@ -2,8 +2,11 @@ import { useMutation, useQueryClient } from "@tanstack/react-query"; import { DEFAULT_SETTINGS } from "#/services/settings"; import OpenHands from "#/api/open-hands"; import { PostSettings, PostApiSettings } from "#/types/settings"; +import { MEMORY_CONDENSER } from "#/utils/feature-flags"; const saveSettingsMutationFn = async (settings: Partial) => { + const resetLlmApiKey = settings.LLM_API_KEY === ""; + const apiSettings: Partial = { llm_model: settings.LLM_MODEL, llm_base_url: settings.LLM_BASE_URL, @@ -11,11 +14,14 @@ const saveSettingsMutationFn = async (settings: Partial) => { language: settings.LANGUAGE || DEFAULT_SETTINGS.LANGUAGE, confirmation_mode: settings.CONFIRMATION_MODE, security_analyzer: settings.SECURITY_ANALYZER, - llm_api_key: settings.LLM_API_KEY?.trim() || undefined, + llm_api_key: resetLlmApiKey + ? "" + : settings.LLM_API_KEY?.trim() || undefined, remote_runtime_resource_factor: settings.REMOTE_RUNTIME_RESOURCE_FACTOR, github_token: settings.github_token, unset_github_token: settings.unset_github_token, - enable_default_condenser: settings.ENABLE_DEFAULT_CONDENSER, + enable_default_condenser: + MEMORY_CONDENSER || settings.ENABLE_DEFAULT_CONDENSER, user_consents_to_analytics: settings.user_consents_to_analytics, }; @@ -30,5 +36,8 @@ export const useSaveSettings = () => { onSuccess: async () => { await queryClient.invalidateQueries({ queryKey: ["settings"] }); }, + meta: { + disableToast: true, + }, }); }; diff --git a/frontend/src/hooks/query/use-settings.ts b/frontend/src/hooks/query/use-settings.ts index dcbca2200701..d5286b8dd729 100644 --- a/frontend/src/hooks/query/use-settings.ts +++ b/frontend/src/hooks/query/use-settings.ts @@ -4,6 +4,7 @@ import posthog from "posthog-js"; import OpenHands from "#/api/open-hands"; import { useAuth } from "#/context/auth-context"; import { useConfig } from "#/hooks/query/use-config"; +import { DEFAULT_SETTINGS } from "#/services/settings"; const getSettingsQueryFn = async () => { const apiSettings = await OpenHands.getSettings(); @@ -28,7 +29,7 @@ export const useSettings = () => { const { data: config } = useConfig(); const query = useQuery({ - queryKey: ["settings"], + queryKey: ["settings", githubTokenIsSet], queryFn: getSettingsQueryFn, enabled: config?.APP_MODE !== "saas" || githubTokenIsSet, // Only retry if the error is not a 404 because we @@ -50,5 +51,16 @@ export const useSettings = () => { setGitHubTokenIsSet(!!query.data?.GITHUB_TOKEN_IS_SET); }, [query.data?.GITHUB_TOKEN_IS_SET, query.isFetched]); + // We want to return the defaults if the settings aren't found so the user can still see the + // options to make their initial save. We don't set the defaults in `initialData` above because + // that would prepopulate the data to the cache and mess with expectations. Read more: + // https://tanstack.com/query/latest/docs/framework/react/guides/initial-query-data#using-initialdata-to-prepopulate-a-query + if (query.error?.status === 404) { + return { + ...query, + data: DEFAULT_SETTINGS, + }; + } + return query; }; diff --git a/frontend/src/hooks/use-app-logout.ts b/frontend/src/hooks/use-app-logout.ts new file mode 100644 index 000000000000..403e443eb06a --- /dev/null +++ b/frontend/src/hooks/use-app-logout.ts @@ -0,0 +1,16 @@ +import { useCurrentSettings } from "#/context/settings-context"; +import { useLogout } from "./mutation/use-logout"; +import { useConfig } from "./query/use-config"; + +export const useAppLogout = () => { + const { data: config } = useConfig(); + const { mutateAsync: logout } = useLogout(); + const { saveUserSettings } = useCurrentSettings(); + + const handleLogout = async () => { + if (config?.APP_MODE === "saas") await logout(); + else await saveUserSettings({ unset_github_token: true }); + }; + + return { handleLogout }; +}; diff --git a/frontend/src/icons/academy.svg b/frontend/src/icons/academy.svg new file mode 100644 index 000000000000..86320b3c6d16 --- /dev/null +++ b/frontend/src/icons/academy.svg @@ -0,0 +1,4 @@ + + + + diff --git a/frontend/src/icons/plus.svg b/frontend/src/icons/plus.svg new file mode 100644 index 000000000000..a3c0dffd5841 --- /dev/null +++ b/frontend/src/icons/plus.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/frontend/src/icons/profile.svg b/frontend/src/icons/profile.svg new file mode 100644 index 000000000000..a3ed9941d685 --- /dev/null +++ b/frontend/src/icons/profile.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/frontend/src/icons/settings.svg b/frontend/src/icons/settings.svg new file mode 100644 index 000000000000..ef366cac9e2e --- /dev/null +++ b/frontend/src/icons/settings.svg @@ -0,0 +1,4 @@ + + + + diff --git a/frontend/src/icons/success.svg b/frontend/src/icons/success.svg new file mode 100644 index 000000000000..ff27cfcfcf61 --- /dev/null +++ b/frontend/src/icons/success.svg @@ -0,0 +1,4 @@ + + + + diff --git a/frontend/src/icons/warning.svg b/frontend/src/icons/warning.svg new file mode 100644 index 000000000000..dc404bc1c7ac --- /dev/null +++ b/frontend/src/icons/warning.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/frontend/src/mocks/handlers.ts b/frontend/src/mocks/handlers.ts index 1a4aa1981088..7ccaeaf689ce 100644 --- a/frontend/src/mocks/handlers.ts +++ b/frontend/src/mocks/handlers.ts @@ -179,8 +179,10 @@ export const handlers = [ return HttpResponse.json(config); }), http.get("/api/settings", async () => { + await delay(); const settings: ApiSettings = { ...MOCK_USER_PREFERENCES.settings, + language: "no", }; // @ts-expect-error - mock types if (settings.github_token) settings.github_token_is_set = true; @@ -290,4 +292,6 @@ export const handlers = [ return HttpResponse.json(null, { status: 404 }); }), + + http.post("/api/logout", () => HttpResponse.json(null, { status: 200 })), ]; diff --git a/frontend/src/query-client-config.ts b/frontend/src/query-client-config.ts index fa9342ce4540..9b95523c0124 100644 --- a/frontend/src/query-client-config.ts +++ b/frontend/src/query-client-config.ts @@ -1,4 +1,8 @@ -import { QueryClientConfig, QueryCache } from "@tanstack/react-query"; +import { + QueryClientConfig, + QueryCache, + MutationCache, +} from "@tanstack/react-query"; import toast from "react-hot-toast"; import { retrieveAxiosErrorMessage } from "./utils/retrieve-axios-error-message"; @@ -20,16 +24,18 @@ export const queryClientConfig: QueryClientConfig = { } }, }), + mutationCache: new MutationCache({ + onError: (error, _, __, mutation) => { + if (!mutation?.meta?.disableToast) { + const message = retrieveAxiosErrorMessage(error); + toast.error(message); + } + }, + }), defaultOptions: { queries: { staleTime: 1000 * 60 * 5, // 5 minutes gcTime: 1000 * 60 * 15, // 15 minutes }, - mutations: { - onError: (error) => { - const message = retrieveAxiosErrorMessage(error); - toast.error(message); - }, - }, }, }; diff --git a/frontend/src/routes.ts b/frontend/src/routes.ts index f71299887783..b840b25402a7 100644 --- a/frontend/src/routes.ts +++ b/frontend/src/routes.ts @@ -8,6 +8,7 @@ import { export default [ layout("routes/_oh/route.tsx", [ index("routes/_oh._index/route.tsx"), + route("settings", "routes/settings.tsx"), route("conversations/:conversationId", "routes/_oh.app/route.tsx", [ index("routes/_oh.app._index/route.tsx"), route("browser", "routes/_oh.app.browser.tsx"), diff --git a/frontend/src/routes/settings.tsx b/frontend/src/routes/settings.tsx new file mode 100644 index 000000000000..15078c9e13a2 --- /dev/null +++ b/frontend/src/routes/settings.tsx @@ -0,0 +1,452 @@ +import React from "react"; +import toast from "react-hot-toast"; +import { Link } from "react-router"; +import { BrandButton } from "#/components/features/settings/brand-button"; +import { SettingsInput } from "#/components/features/settings/settings-input"; +import { SettingsSwitch } from "#/components/features/settings/settings-switch"; +import { HelpLink } from "#/components/features/settings/help-link"; +import { AvailableLanguages } from "#/i18n"; +import { hasAdvancedSettingsSet } from "#/utils/has-advanced-settings-set"; +import { DEFAULT_SETTINGS } from "#/services/settings"; +import { useSettings } from "#/hooks/query/use-settings"; +import { useConfig } from "#/hooks/query/use-config"; +import { useSaveSettings } from "#/hooks/mutation/use-save-settings"; +import { useAIConfigOptions } from "#/hooks/query/use-ai-config-options"; +import { ModelSelector } from "#/components/shared/modals/settings/model-selector"; +import { organizeModelsAndProviders } from "#/utils/organize-models-and-providers"; +import { useAppLogout } from "#/hooks/use-app-logout"; +import { handleCaptureConsent } from "#/utils/handle-capture-consent"; +import { ModalBackdrop } from "#/components/shared/modals/modal-backdrop"; +import { SettingsDropdownInput } from "#/components/features/settings/settings-dropdown-input"; +import { KeyStatusIcon } from "#/components/features/settings/key-status-icon"; +import SettingsIcon from "#/icons/settings.svg?react"; +import { retrieveAxiosErrorMessage } from "#/utils/retrieve-axios-error-message"; +import { LoadingSpinner } from "#/components/shared/loading-spinner"; +import { isCustomModel } from "#/utils/is-custom-model"; + +const REMOTE_RUNTIME_OPTIONS = [ + { key: 1, label: "1x (2 core, 8G)" }, + { key: 2, label: "2x (4 core, 16G)" }, +]; + +const displayErrorToast = (error: string) => { + toast.error(error, { + position: "top-right", + style: { + background: "#454545", + border: "1px solid #717888", + color: "#fff", + borderRadius: "4px", + }, + }); +}; + +const displaySuccessToast = (message: string) => { + toast.success(message, { + position: "top-right", + style: { + background: "#454545", + border: "1px solid #717888", + color: "#fff", + borderRadius: "4px", + }, + }); +}; + +function SettingsScreen() { + const { + data: settings, + isFetching: isFetchingSettings, + isFetched, + isSuccess: isSuccessfulSettings, + } = useSettings(); + const { data: config } = useConfig(); + const { + data: resources, + isFetching: isFetchingResources, + isSuccess: isSuccessfulResources, + } = useAIConfigOptions(); + const { mutate: saveSettings } = useSaveSettings(); + const { handleLogout } = useAppLogout(); + + const isFetching = isFetchingSettings || isFetchingResources; + const isSuccess = isSuccessfulSettings && isSuccessfulResources; + + const determineWhetherToToggleAdvancedSettings = () => { + if (isSuccess) { + return ( + isCustomModel(resources.models, settings.LLM_MODEL) || + hasAdvancedSettingsSet(settings) + ); + } + + return false; + }; + + const isSaas = config?.APP_MODE === "saas"; + const hasAppSlug = !!config?.APP_SLUG; + const isGitHubTokenSet = settings?.GITHUB_TOKEN_IS_SET; + const isLLMKeySet = settings?.LLM_API_KEY === "**********"; + const isAnalyticsEnabled = settings?.USER_CONSENTS_TO_ANALYTICS; + const isAdvancedSettingsSet = determineWhetherToToggleAdvancedSettings(); + + const modelsAndProviders = organizeModelsAndProviders( + resources?.models || [], + ); + + const [llmConfigMode, setLlmConfigMode] = React.useState< + "basic" | "advanced" + >(isAdvancedSettingsSet ? "advanced" : "basic"); + const [confirmationModeIsEnabled, setConfirmationModeIsEnabled] = + React.useState(!!settings?.SECURITY_ANALYZER); + const [resetSettingsModalIsOpen, setResetSettingsModalIsOpen] = + React.useState(false); + + const formAction = async (formData: FormData) => { + const languageLabel = formData.get("language-input")?.toString(); + const languageValue = AvailableLanguages.find( + ({ label }) => label === languageLabel, + )?.value; + + const llmProvider = formData.get("llm-provider-input")?.toString(); + const llmModel = formData.get("llm-model-input")?.toString(); + const fullLlmModel = `${llmProvider}/${llmModel}`.toLowerCase(); + const customLlmModel = formData.get("llm-custom-model-input")?.toString(); + + const rawRemoteRuntimeResourceFactor = formData + .get("runtime-settings-input") + ?.toString(); + const remoteRuntimeResourceFactor = REMOTE_RUNTIME_OPTIONS.find( + ({ label }) => label === rawRemoteRuntimeResourceFactor, + )?.key; + + const userConsentsToAnalytics = + formData.get("enable-analytics-switch")?.toString() === "on"; + + saveSettings( + { + github_token: + formData.get("github-token-input")?.toString() || undefined, + LANGUAGE: languageValue, + user_consents_to_analytics: userConsentsToAnalytics, + LLM_MODEL: customLlmModel || fullLlmModel, + LLM_BASE_URL: formData.get("base-url-input")?.toString() || "", + LLM_API_KEY: formData.get("llm-api-key-input")?.toString() || undefined, + AGENT: formData.get("agent-input")?.toString(), + SECURITY_ANALYZER: + formData.get("security-analyzer-input")?.toString() || "", + REMOTE_RUNTIME_RESOURCE_FACTOR: + remoteRuntimeResourceFactor || + DEFAULT_SETTINGS.REMOTE_RUNTIME_RESOURCE_FACTOR, + CONFIRMATION_MODE: confirmationModeIsEnabled, + }, + { + onSuccess: () => { + handleCaptureConsent(userConsentsToAnalytics); + displaySuccessToast("Settings saved"); + setLlmConfigMode(isAdvancedSettingsSet ? "advanced" : "basic"); + }, + onError: (error) => { + const errorMessage = retrieveAxiosErrorMessage(error); + displayErrorToast(errorMessage); + }, + }, + ); + }; + + const handleReset = () => { + saveSettings( + { + ...DEFAULT_SETTINGS, + LLM_API_KEY: "", // reset LLM API key + }, + { + onSuccess: () => { + displaySuccessToast("Settings reset"); + setResetSettingsModalIsOpen(false); + setLlmConfigMode(isAdvancedSettingsSet ? "advanced" : "basic"); + }, + }, + ); + }; + + React.useEffect(() => { + // If settings is still loading by the time the state is set, it will always + // default to basic settings. This is a workaround to ensure the correct + // settings are displayed. + setLlmConfigMode(isAdvancedSettingsSet ? "advanced" : "basic"); + }, [isAdvancedSettingsSet]); + + if (isFetched && !settings) { + return
Failed to fetch settings. Please try reloading.
; + } + + const onToggleAdvancedMode = (isToggled: boolean) => { + setLlmConfigMode(isToggled ? "advanced" : "basic"); + if (!isToggled) { + // reset advanced state + setConfirmationModeIsEnabled(!!settings?.SECURITY_ANALYZER); + } + }; + + return ( +
+
+
+ +

Settings

+
+ + {isFetching && ( +
+ +
+ )} + {!isFetching && settings && ( +
+
+
+

+ LLM Settings +

+ + Advanced + +
+ + {llmConfigMode === "basic" && ( + + )} + + {llmConfigMode === "advanced" && ( + + )} + {llmConfigMode === "advanced" && ( + + )} + + } + placeholder={isLLMKeySet ? "**********" : ""} + /> + + + + {llmConfigMode === "advanced" && ( + ({ + key: agent, + label: agent, + })) || [] + } + defaultSelectedKey={settings.AGENT} + isClearable={false} + /> + )} + + {isSaas && llmConfigMode === "advanced" && ( + + )} + + {llmConfigMode === "advanced" && ( + + Enable confirmation mode + + )} + {llmConfigMode === "advanced" && confirmationModeIsEnabled && ( +
+ ({ + key: analyzer, + label: analyzer, + })) || [] + } + defaultSelectedKey={settings.SECURITY_ANALYZER} + isClearable + showOptionalTag + /> +
+ )} +
+ +
+

+ GitHub Settings +

+ {isSaas && hasAppSlug && ( + + + Configure GitHub Repositories + + + )} + {!isSaas && ( + <> + } + /> + + + + )} + + + Disconnect from GitHub + +
+ +
+

+ Additional Settings +

+ + ({ + key: language.value, + label: language.label, + }))} + defaultSelectedKey={settings.LANGUAGE} + isClearable={false} + /> + + + Enable analytics + +
+
+ )} + +
+ setResetSettingsModalIsOpen(true)} + > + Reset to defaults + + + Save Changes + +
+
+ + {resetSettingsModalIsOpen && ( + +
+

Are you sure you want to reset all settings?

+
+ { + handleReset(); + }} + > + Reset + + + { + setResetSettingsModalIsOpen(false); + }} + > + Cancel + +
+
+
+ )} +
+ ); +} + +export default SettingsScreen; diff --git a/frontend/src/types/react-query.d.ts b/frontend/src/types/react-query.d.ts index 830a95a340b3..870623edb269 100644 --- a/frontend/src/types/react-query.d.ts +++ b/frontend/src/types/react-query.d.ts @@ -1,8 +1,15 @@ import "@tanstack/react-query"; import type { AxiosError } from "axios"; +interface MyMeta extends Record { + disableToast?: boolean; +} + declare module "@tanstack/react-query" { interface Register { defaultError: AxiosError; + + queryMeta: MyMeta; + mutationMeta: MyMeta; } } diff --git a/frontend/src/types/settings.ts b/frontend/src/types/settings.ts index 51da54b9ac6e..4723690e52f3 100644 --- a/frontend/src/types/settings.ts +++ b/frontend/src/types/settings.ts @@ -6,7 +6,7 @@ export type Settings = { LLM_API_KEY: string | null; CONFIRMATION_MODE: boolean; SECURITY_ANALYZER: string; - REMOTE_RUNTIME_RESOURCE_FACTOR: number; + REMOTE_RUNTIME_RESOURCE_FACTOR: number | null; GITHUB_TOKEN_IS_SET: boolean; ENABLE_DEFAULT_CONDENSER: boolean; USER_CONSENTS_TO_ANALYTICS: boolean | null; @@ -20,7 +20,7 @@ export type ApiSettings = { llm_api_key: string | null; confirmation_mode: boolean; security_analyzer: string; - remote_runtime_resource_factor: number; + remote_runtime_resource_factor: number | null; github_token_is_set: boolean; enable_default_condenser: boolean; user_consents_to_analytics: boolean | null; diff --git a/frontend/src/utils/has-advanced-settings-set.ts b/frontend/src/utils/has-advanced-settings-set.ts new file mode 100644 index 000000000000..374047c36a6f --- /dev/null +++ b/frontend/src/utils/has-advanced-settings-set.ts @@ -0,0 +1,10 @@ +import { DEFAULT_SETTINGS } from "#/services/settings"; +import { Settings } from "#/types/settings"; + +export const hasAdvancedSettingsSet = (settings: Settings): boolean => + !!settings.LLM_BASE_URL || + settings.AGENT !== DEFAULT_SETTINGS.AGENT || + settings.REMOTE_RUNTIME_RESOURCE_FACTOR !== + DEFAULT_SETTINGS.REMOTE_RUNTIME_RESOURCE_FACTOR || + settings.CONFIRMATION_MODE || + !!settings.SECURITY_ANALYZER; diff --git a/frontend/src/utils/is-custom-model.ts b/frontend/src/utils/is-custom-model.ts new file mode 100644 index 000000000000..b4d5418f3814 --- /dev/null +++ b/frontend/src/utils/is-custom-model.ts @@ -0,0 +1,22 @@ +import { extractModelAndProvider } from "./extract-model-and-provider"; +import { organizeModelsAndProviders } from "./organize-models-and-providers"; + +/** + * Check if a model is a custom model. A custom model is a model that is not part of the default models. + * @param models Full list of models + * @param model Model to check + * @returns Whether the model is a custom model + */ +export const isCustomModel = (models: string[], model: string): boolean => { + if (!model) return false; + + const organizedModels = organizeModelsAndProviders(models); + const { provider: extractedProvider, model: extractedModel } = + extractModelAndProvider(model); + + const isKnownModel = + extractedProvider in organizedModels && + organizedModels[extractedProvider].models.includes(extractedModel); + + return !isKnownModel; +}; diff --git a/frontend/src/utils/settings-utils.ts b/frontend/src/utils/settings-utils.ts index f32835d2d804..b8533b3a4991 100644 --- a/frontend/src/utils/settings-utils.ts +++ b/frontend/src/utils/settings-utils.ts @@ -1,11 +1,11 @@ import { Settings } from "#/types/settings"; const extractBasicFormData = (formData: FormData) => { - const provider = formData.get("llm-provider")?.toString(); - const model = formData.get("llm-model")?.toString(); + const provider = formData.get("llm-provider-input")?.toString(); + const model = formData.get("llm-model-input")?.toString(); const LLM_MODEL = `${provider}/${model}`.toLowerCase(); - const LLM_API_KEY = formData.get("api-key")?.toString(); + const LLM_API_KEY = formData.get("llm-api-key-input")?.toString(); const AGENT = formData.get("agent")?.toString(); const LANGUAGE = formData.get("language")?.toString(); From 1ddfa99c573b3ae871338e93fce9c65cbf8953d0 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Fri, 14 Feb 2025 14:11:01 -0500 Subject: [PATCH 13/44] [Resolver]: Prep env in expectation of release (#6735) --- .github/workflows/openhands-resolver.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/openhands-resolver.yml b/.github/workflows/openhands-resolver.yml index dbfd678fc162..a69c320e5c2e 100644 --- a/.github/workflows/openhands-resolver.yml +++ b/.github/workflows/openhands-resolver.yml @@ -232,6 +232,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN || github.token }} GITHUB_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} + GIT_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} LLM_MODEL: ${{ secrets.LLM_MODEL || inputs.LLM_MODEL }} LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} @@ -268,6 +269,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN || github.token }} GITHUB_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} + GIT_USERNAME: ${{ secrets.PAT_USERNAME || 'openhands-agent' }} LLM_MODEL: ${{ secrets.LLM_MODEL || inputs.LLM_MODEL }} LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }} From 99b50d038edbba4506ac5828beb1350faee6442a Mon Sep 17 00:00:00 2001 From: "Ryan H. Tran" Date: Sat, 15 Feb 2025 02:21:40 +0700 Subject: [PATCH 14/44] chore: upgrade `openhands-aci` to 0.2.2 (#6731) --- poetry.lock | 28 ++++++++++++++++++++++------ pyproject.toml | 4 +++- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index cf0b20e6f7e3..b2c74ff59ffe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -547,6 +547,21 @@ files = [ {file = "bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71"}, ] +[[package]] +name = "binaryornot" +version = "0.4.4" +description = "Ultra-lightweight pure Python package to check if a file is binary or text." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "binaryornot-0.4.4-py2.py3-none-any.whl", hash = "sha256:b8b71173c917bddcd2c16070412e369c3ed7f0528926f70cac18a6c97fd563e4"}, + {file = "binaryornot-0.4.4.tar.gz", hash = "sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061"}, +] + +[package.dependencies] +chardet = ">=3.0.2" + [[package]] name = "bleach" version = "6.2.0" @@ -911,7 +926,7 @@ version = "5.2.0" description = "Universal encoding detector for Python 3" optional = false python-versions = ">=3.7" -groups = ["evaluation", "test"] +groups = ["main", "evaluation", "test"] files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, @@ -5862,17 +5877,18 @@ realtime = ["websockets (>=13,<15)"] [[package]] name = "openhands-aci" -version = "0.2.1" +version = "0.2.2" description = "An Agent-Computer Interface (ACI) designed for software development agents OpenHands." optional = false python-versions = "<4.0,>=3.12" groups = ["main"] files = [ - {file = "openhands_aci-0.2.1-py3-none-any.whl", hash = "sha256:10f5038e6303b8e1d40af0b61fb967f1d9d00c1ef05c2a06db2529364f3fef04"}, - {file = "openhands_aci-0.2.1.tar.gz", hash = "sha256:7dc72ba3aa7e9d699aacf8b85909ecaa6b87e7e4f203e4d549f740ac56d5ca2b"}, + {file = "openhands_aci-0.2.2-py3-none-any.whl", hash = "sha256:fdcea74d5760b7f936e532dec2923f06d6ba67b13312e2d91d230e751aa255f1"}, + {file = "openhands_aci-0.2.2.tar.gz", hash = "sha256:947d6c42d4d439200d0bda4748ee8bf5f0c517e8ee554d1c819b82f1d38536c6"}, ] [package.dependencies] +binaryornot = ">=0.4.4,<0.5.0" diskcache = ">=5.6.3,<6.0.0" flake8 = "*" gitpython = "*" @@ -10636,4 +10652,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "439164c45c674574af3fc15ade8e69452c11d4d45d95c2e671c752cfed6a3143" +content-hash = "e34523a44c733c59e3e2e580a4f17d45c8cd6e593a773707c77f21d0f848b185" diff --git a/pyproject.toml b/pyproject.toml index 755f75981dbe..6e3dfe61a3b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,7 +67,7 @@ runloop-api-client = "0.22.0" libtmux = ">=0.37,<0.40" pygithub = "^2.5.0" joblib = "*" -openhands-aci = "^0.2.1" +openhands-aci = "^0.2.2" python-socketio = "^5.11.4" redis = "^5.2.0" sse-starlette = "^2.1.3" @@ -105,6 +105,7 @@ reportlab = "*" [tool.coverage.run] concurrency = ["gevent"] + [tool.poetry.group.runtime.dependencies] jupyterlab = "*" notebook = "*" @@ -133,6 +134,7 @@ ignore = ["D1"] [tool.ruff.lint.pydocstyle] convention = "google" + [tool.poetry.group.evaluation.dependencies] streamlit = "*" whatthepatch = "*" From b07fddcb718c1de52b02360d3412005504c86eb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 15 Feb 2025 01:17:17 +0000 Subject: [PATCH 15/44] chore(deps): bump the version-all group across 1 directory with 12 updates (#6736) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Engel Nyst --- poetry.lock | 207 +++++++++++++++++++++++++------------------------ pyproject.toml | 4 +- 2 files changed, 107 insertions(+), 104 deletions(-) diff --git a/poetry.lock b/poetry.lock index b2c74ff59ffe..5100a72f3949 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -595,18 +595,18 @@ files = [ [[package]] name = "boto3" -version = "1.36.16" +version = "1.36.20" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "boto3-1.36.16-py3-none-any.whl", hash = "sha256:b10583bf8bd35be1b4027ee7e26b7cdf2078c79eab18357fd602cecb6d39400b"}, - {file = "boto3-1.36.16.tar.gz", hash = "sha256:0cf92ca0538ab115447e1c58050d43e1273e88c58ddfea2b6f133fdc508b400a"}, + {file = "boto3-1.36.20-py3-none-any.whl", hash = "sha256:e132e31232ee107f1c187f566d96863a907433e5bdd8d8928effddd30a96242f"}, + {file = "boto3-1.36.20.tar.gz", hash = "sha256:4a27ffc0543c2a429600542047f00c6a1e95270139d36d8cc636e9cc9a78b835"}, ] [package.dependencies] -botocore = ">=1.36.16,<1.37.0" +botocore = ">=1.36.20,<1.37.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.11.0,<0.12.0" @@ -615,14 +615,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.36.16" +version = "1.36.20" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "botocore-1.36.16-py3-none-any.whl", hash = "sha256:aca0348ccd730332082489b6817fdf89e1526049adcf6e9c8c11c96dd9f42c03"}, - {file = "botocore-1.36.16.tar.gz", hash = "sha256:10c6aa386ba1a9a0faef6bb5dbfc58fc2563a3c6b95352e86a583cd5f14b11f3"}, + {file = "botocore-1.36.20-py3-none-any.whl", hash = "sha256:0110bf2208e4569659d0ccfca94baa4999501334397987b02712a94493cbf48b"}, + {file = "botocore-1.36.20.tar.gz", hash = "sha256:3815a05518ff03a8dbc8d5a3c29b95889409a25ac87a282067f6e26fefb7c40a"}, ] [package.dependencies] @@ -1385,6 +1385,7 @@ files = [ {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb"}, {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b"}, {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543"}, + {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:60eb32934076fa07e4316b7b2742fa52cbb190b42c2df2863dbc4230a0a9b385"}, {file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e"}, {file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e"}, {file = "cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053"}, @@ -1395,6 +1396,7 @@ files = [ {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289"}, {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7"}, {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c"}, + {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:9abcc2e083cbe8dde89124a47e5e53ec38751f0d7dfd36801008f316a127d7ba"}, {file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64"}, {file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285"}, {file = "cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417"}, @@ -1702,14 +1704,14 @@ files = [ [[package]] name = "e2b" -version = "1.0.6" +version = "1.1.0" description = "E2B SDK that give agents cloud environments" optional = false -python-versions = "<4.0,>=3.8" +python-versions = "<4.0,>=3.9" groups = ["main"] files = [ - {file = "e2b-1.0.6-py3-none-any.whl", hash = "sha256:4ae6e00d46e6b0b9ab05388c408f9155488ee9f022c5a6fd47939f492ccf3b58"}, - {file = "e2b-1.0.6.tar.gz", hash = "sha256:e35d47f5581565060a5c18e4cb839cf61de310d275fa0a6589d8fc8bf65957a7"}, + {file = "e2b-1.1.0-py3-none-any.whl", hash = "sha256:5d99c675e155cf124f457d77f91c4cb32b286d241ca6cd37ac8d6c0711fc272e"}, + {file = "e2b-1.1.0.tar.gz", hash = "sha256:bd054fbaa9baed48919500ba853bdb72c750b04e0bac8365bde75cdfbdf80d18"}, ] [package.dependencies] @@ -2394,14 +2396,14 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-api-python-client" -version = "2.160.0" +version = "2.161.0" description = "Google API Client Library for Python" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "google_api_python_client-2.160.0-py2.py3-none-any.whl", hash = "sha256:63d61fb3e4cf3fb31a70a87f45567c22f6dfe87bbfa27252317e3e2c42900db4"}, - {file = "google_api_python_client-2.160.0.tar.gz", hash = "sha256:a8ccafaecfa42d15d5b5c3134ced8de08380019717fc9fb1ed510ca58eca3b7e"}, + {file = "google_api_python_client-2.161.0-py2.py3-none-any.whl", hash = "sha256:9476a5a4f200bae368140453df40f9cda36be53fa7d0e9a9aac4cdb859a26448"}, + {file = "google_api_python_client-2.161.0.tar.gz", hash = "sha256:324c0cce73e9ea0a0d2afd5937e01b7c2d6a4d7e2579cdb6c384f9699d6c9f37"}, ] [package.dependencies] @@ -2473,14 +2475,14 @@ tool = ["click (>=6.0.0)"] [[package]] name = "google-cloud-aiplatform" -version = "1.79.0" +version = "1.80.0" description = "Vertex AI API client library" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "google_cloud_aiplatform-1.79.0-py2.py3-none-any.whl", hash = "sha256:e52d518c386ce2b4ce57f1b73b46c57531d9a6ccd70c21a37b349f428bfc1c3f"}, - {file = "google_cloud_aiplatform-1.79.0.tar.gz", hash = "sha256:362bfd16716dcfb6c131736f25246790002b29c99a246fcf4c08a7c71bd2301f"}, + {file = "google_cloud_aiplatform-1.80.0-py2.py3-none-any.whl", hash = "sha256:45d2a170f22431dae977551eccb740400bdb899807d0c8d4c16c53b2c1dbc6a5"}, + {file = "google_cloud_aiplatform-1.80.0.tar.gz", hash = "sha256:bcaa4570a6fb56d3d29cb6b8f92588d4d1a1931de5f90cf07761853dab4c76fd"}, ] [package.dependencies] @@ -3582,14 +3584,14 @@ files = [ [[package]] name = "json-repair" -version = "0.35.0" +version = "0.36.1" description = "A package to repair broken json strings" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "json_repair-0.35.0-py3-none-any.whl", hash = "sha256:1d429407158474d28a996e745b8f8f7dc78957cb2cfbc92120b9f580b5230a9e"}, - {file = "json_repair-0.35.0.tar.gz", hash = "sha256:e70f834865a4ae5fe64352c23c1c16d3b70c5dd62dc544a169d8b0932bdbdcaa"}, + {file = "json_repair-0.36.1-py3-none-any.whl", hash = "sha256:ed7ca0c4cf813cc9a75843297507dd8bb21394fef58dc9fde81f542aaaa43457"}, + {file = "json_repair-0.36.1.tar.gz", hash = "sha256:f01688157d610b0b1f22d86bf54d45c7fc0729965c66726c1ec1ad64f98d6572"}, ] [[package]] @@ -4121,14 +4123,14 @@ types-tqdm = "*" [[package]] name = "litellm" -version = "1.60.8" +version = "1.61.3" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" groups = ["main"] files = [ - {file = "litellm-1.60.8-py3-none-any.whl", hash = "sha256:260bdcc9749c769f1a84dc927abe7c91f6294a97da05abc6b513c5dd2dcf17a1"}, - {file = "litellm-1.60.8.tar.gz", hash = "sha256:4a0aca9bd226d727ca4a41aaf8722f825fc10cf33f37a177a3cceb4ee2c442d8"}, + {file = "litellm-1.61.3-py3-none-any.whl", hash = "sha256:c01145759260eeb7f624aba82e333804181da4c03fa5b980361490cdaf5b1e65"}, + {file = "litellm-1.61.3.tar.gz", hash = "sha256:626ca5731cf640097e30b0b665ec282de2d2bd8b95cf6a41255819576201de03"}, ] [package.dependencies] @@ -4167,20 +4169,20 @@ pydantic = ">=1.10" [[package]] name = "llama-index" -version = "0.12.16" +version = "0.12.17" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.9" groups = ["llama-index"] files = [ - {file = "llama_index-0.12.16-py3-none-any.whl", hash = "sha256:c94d0cf6735219d97d91e2eca5bcfac89ec1583990917f934b075d5a45686cf6"}, - {file = "llama_index-0.12.16.tar.gz", hash = "sha256:4fd5f5b94eb3f8dd470bb8cc0e1b985d931e8f31473266ef69855488fd8ae3f2"}, + {file = "llama_index-0.12.17-py3-none-any.whl", hash = "sha256:d8938e5e6e5ff78b6865f7890a01d1a40818a5df798555ee6eb7f2c5ab65aeb0"}, + {file = "llama_index-0.12.17.tar.gz", hash = "sha256:761a2dad3eb74bd5242ecf8fd28337c0c8745fc8d39d2f9f9b18bf733ad679f4"}, ] [package.dependencies] llama-index-agent-openai = ">=0.4.0,<0.5.0" llama-index-cli = ">=0.4.0,<0.5.0" -llama-index-core = ">=0.12.16,<0.13.0" +llama-index-core = ">=0.12.17,<0.13.0" llama-index-embeddings-openai = ">=0.3.0,<0.4.0" llama-index-indices-managed-llama-cloud = ">=0.4.0" llama-index-llms-openai = ">=0.3.0,<0.4.0" @@ -4227,14 +4229,14 @@ llama-index-llms-openai = ">=0.3.0,<0.4.0" [[package]] name = "llama-index-core" -version = "0.12.16.post1" +version = "0.12.17" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.9" groups = ["llama-index"] files = [ - {file = "llama_index_core-0.12.16.post1-py3-none-any.whl", hash = "sha256:95904a44f25e122a45963541c56a50c4daf2ffaf062d1a3224c84a6dc9e6801f"}, - {file = "llama_index_core-0.12.16.post1.tar.gz", hash = "sha256:8fed0554ae71b6c1f80b53164723af28c887951eef7aa1b44ba6c8103c0efb2c"}, + {file = "llama_index_core-0.12.17-py3-none-any.whl", hash = "sha256:867ec650a1f9eba9f6d65005045a68bc13bae8d65763e32029d9610360c03979"}, + {file = "llama_index_core-0.12.17.tar.gz", hash = "sha256:2e8fb457983978af19db1ceba71d440f6891279525c5e7eb2ec73a6b727be113"}, ] [package.dependencies] @@ -5029,13 +5031,14 @@ type = ["mypy (==1.11.2)"] [[package]] name = "modal" -version = "0.73.26" +version = "0.73.49" description = "Python client library for Modal" optional = false python-versions = ">=3.9" groups = ["main", "evaluation"] files = [ - {file = "modal-0.73.26-py3-none-any.whl", hash = "sha256:8b3d3708ee43270723057ddacf13aa28028abfccc0c87e01b707a41bd7440ef2"}, + {file = "modal-0.73.49-py3-none-any.whl", hash = "sha256:81e885c8b7246d447d1adb914f80b8ac41a82e7004bb4044624d490701912e1e"}, + {file = "modal-0.73.49.tar.gz", hash = "sha256:4a9cd8eb47ad8226b7f508a60c6c1f3f238c39e9a4207844ff98fbbd9f64f296"}, ] [package.dependencies] @@ -5051,7 +5054,7 @@ toml = "*" typer = ">=0.9" types-certifi = "*" types-toml = "*" -typing-extensions = ">=4.6,<5.0" +typing_extensions = ">=4.6,<5.0" watchfiles = "*" [[package]] @@ -5535,67 +5538,67 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync" [[package]] name = "numpy" -version = "2.2.2" +version = "2.2.3" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" groups = ["main", "evaluation", "llama-index", "test"] files = [ - {file = "numpy-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7079129b64cb78bdc8d611d1fd7e8002c0a2565da6a47c4df8062349fee90e3e"}, - {file = "numpy-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ec6c689c61df613b783aeb21f945c4cbe6c51c28cb70aae8430577ab39f163e"}, - {file = "numpy-2.2.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:40c7ff5da22cd391944a28c6a9c638a5eef77fcf71d6e3a79e1d9d9e82752715"}, - {file = "numpy-2.2.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:995f9e8181723852ca458e22de5d9b7d3ba4da3f11cc1cb113f093b271d7965a"}, - {file = "numpy-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78ea78450fd96a498f50ee096f69c75379af5138f7881a51355ab0e11286c97"}, - {file = "numpy-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fbe72d347fbc59f94124125e73fc4976a06927ebc503ec5afbfb35f193cd957"}, - {file = "numpy-2.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8e6da5cffbbe571f93588f562ed130ea63ee206d12851b60819512dd3e1ba50d"}, - {file = "numpy-2.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09d6a2032faf25e8d0cadde7fd6145118ac55d2740132c1d845f98721b5ebcfd"}, - {file = "numpy-2.2.2-cp310-cp310-win32.whl", hash = "sha256:159ff6ee4c4a36a23fe01b7c3d07bd8c14cc433d9720f977fcd52c13c0098160"}, - {file = "numpy-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:64bd6e1762cd7f0986a740fee4dff927b9ec2c5e4d9a28d056eb17d332158014"}, - {file = "numpy-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:642199e98af1bd2b6aeb8ecf726972d238c9877b0f6e8221ee5ab945ec8a2189"}, - {file = "numpy-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6d9fc9d812c81e6168b6d405bf00b8d6739a7f72ef22a9214c4241e0dc70b323"}, - {file = "numpy-2.2.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c7d1fd447e33ee20c1f33f2c8e6634211124a9aabde3c617687d8b739aa69eac"}, - {file = "numpy-2.2.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:451e854cfae0febe723077bd0cf0a4302a5d84ff25f0bfece8f29206c7bed02e"}, - {file = "numpy-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd249bc894af67cbd8bad2c22e7cbcd46cf87ddfca1f1289d1e7e54868cc785c"}, - {file = "numpy-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02935e2c3c0c6cbe9c7955a8efa8908dd4221d7755644c59d1bba28b94fd334f"}, - {file = "numpy-2.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a972cec723e0563aa0823ee2ab1df0cb196ed0778f173b381c871a03719d4826"}, - {file = "numpy-2.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6d6a0910c3b4368d89dde073e630882cdb266755565155bc33520283b2d9df8"}, - {file = "numpy-2.2.2-cp311-cp311-win32.whl", hash = "sha256:860fd59990c37c3ef913c3ae390b3929d005243acca1a86facb0773e2d8d9e50"}, - {file = "numpy-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:da1eeb460ecce8d5b8608826595c777728cdf28ce7b5a5a8c8ac8d949beadcf2"}, - {file = "numpy-2.2.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ac9bea18d6d58a995fac1b2cb4488e17eceeac413af014b1dd26170b766d8467"}, - {file = "numpy-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23ae9f0c2d889b7b2d88a3791f6c09e2ef827c2446f1c4a3e3e76328ee4afd9a"}, - {file = "numpy-2.2.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3074634ea4d6df66be04f6728ee1d173cfded75d002c75fac79503a880bf3825"}, - {file = "numpy-2.2.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ec0636d3f7d68520afc6ac2dc4b8341ddb725039de042faf0e311599f54eb37"}, - {file = "numpy-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ffbb1acd69fdf8e89dd60ef6182ca90a743620957afb7066385a7bbe88dc748"}, - {file = "numpy-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0349b025e15ea9d05c3d63f9657707a4e1d471128a3b1d876c095f328f8ff7f0"}, - {file = "numpy-2.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:463247edcee4a5537841d5350bc87fe8e92d7dd0e8c71c995d2c6eecb8208278"}, - {file = "numpy-2.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9dd47ff0cb2a656ad69c38da850df3454da88ee9a6fde0ba79acceee0e79daba"}, - {file = "numpy-2.2.2-cp312-cp312-win32.whl", hash = "sha256:4525b88c11906d5ab1b0ec1f290996c0020dd318af8b49acaa46f198b1ffc283"}, - {file = "numpy-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:5acea83b801e98541619af398cc0109ff48016955cc0818f478ee9ef1c5c3dcb"}, - {file = "numpy-2.2.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b208cfd4f5fe34e1535c08983a1a6803fdbc7a1e86cf13dd0c61de0b51a0aadc"}, - {file = "numpy-2.2.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0bbe7dd86dca64854f4b6ce2ea5c60b51e36dfd597300057cf473d3615f2369"}, - {file = "numpy-2.2.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:22ea3bb552ade325530e72a0c557cdf2dea8914d3a5e1fecf58fa5dbcc6f43cd"}, - {file = "numpy-2.2.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:128c41c085cab8a85dc29e66ed88c05613dccf6bc28b3866cd16050a2f5448be"}, - {file = "numpy-2.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:250c16b277e3b809ac20d1f590716597481061b514223c7badb7a0f9993c7f84"}, - {file = "numpy-2.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c8854b09bc4de7b041148d8550d3bd712b5c21ff6a8ed308085f190235d7ff"}, - {file = "numpy-2.2.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b6fb9c32a91ec32a689ec6410def76443e3c750e7cfc3fb2206b985ffb2b85f0"}, - {file = "numpy-2.2.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:57b4012e04cc12b78590a334907e01b3a85efb2107df2b8733ff1ed05fce71de"}, - {file = "numpy-2.2.2-cp313-cp313-win32.whl", hash = "sha256:4dbd80e453bd34bd003b16bd802fac70ad76bd463f81f0c518d1245b1c55e3d9"}, - {file = "numpy-2.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:5a8c863ceacae696aff37d1fd636121f1a512117652e5dfb86031c8d84836369"}, - {file = "numpy-2.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b3482cb7b3325faa5f6bc179649406058253d91ceda359c104dac0ad320e1391"}, - {file = "numpy-2.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9491100aba630910489c1d0158034e1c9a6546f0b1340f716d522dc103788e39"}, - {file = "numpy-2.2.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:41184c416143defa34cc8eb9d070b0a5ba4f13a0fa96a709e20584638254b317"}, - {file = "numpy-2.2.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:7dca87ca328f5ea7dafc907c5ec100d187911f94825f8700caac0b3f4c384b49"}, - {file = "numpy-2.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bc61b307655d1a7f9f4b043628b9f2b721e80839914ede634e3d485913e1fb2"}, - {file = "numpy-2.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fad446ad0bc886855ddf5909cbf8cb5d0faa637aaa6277fb4b19ade134ab3c7"}, - {file = "numpy-2.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:149d1113ac15005652e8d0d3f6fd599360e1a708a4f98e43c9c77834a28238cb"}, - {file = "numpy-2.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:106397dbbb1896f99e044efc90360d098b3335060375c26aa89c0d8a97c5f648"}, - {file = "numpy-2.2.2-cp313-cp313t-win32.whl", hash = "sha256:0eec19f8af947a61e968d5429f0bd92fec46d92b0008d0a6685b40d6adf8a4f4"}, - {file = "numpy-2.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:97b974d3ba0fb4612b77ed35d7627490e8e3dff56ab41454d9e8b23448940576"}, - {file = "numpy-2.2.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b0531f0b0e07643eb089df4c509d30d72c9ef40defa53e41363eca8a8cc61495"}, - {file = "numpy-2.2.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:e9e82dcb3f2ebbc8cb5ce1102d5f1c5ed236bf8a11730fb45ba82e2841ec21df"}, - {file = "numpy-2.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d4142eb40ca6f94539e4db929410f2a46052a0fe7a2c1c59f6179c39938d2a"}, - {file = "numpy-2.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:356ca982c188acbfa6af0d694284d8cf20e95b1c3d0aefa8929376fea9146f60"}, - {file = "numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cbc6472e01952d3d1b2772b720428f8b90e2deea8344e854df22b0618e9cce71"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdfe0c22692a30cd830c0755746473ae66c4a8f2e7bd508b35fb3b6a0813d787"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:e37242f5324ffd9f7ba5acf96d774f9276aa62a966c0bad8dae692deebec7716"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95172a21038c9b423e68be78fd0be6e1b97674cde269b76fe269a5dfa6fadf0b"}, + {file = "numpy-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5b47c440210c5d1d67e1cf434124e0b5c395eee1f5806fdd89b553ed1acd0a3"}, + {file = "numpy-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0391ea3622f5c51a2e29708877d56e3d276827ac5447d7f45e9bc4ade8923c52"}, + {file = "numpy-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f6b3dfc7661f8842babd8ea07e9897fe3d9b69a1d7e5fbb743e4160f9387833b"}, + {file = "numpy-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1ad78ce7f18ce4e7df1b2ea4019b5817a2f6a8a16e34ff2775f646adce0a5027"}, + {file = "numpy-2.2.3-cp310-cp310-win32.whl", hash = "sha256:5ebeb7ef54a7be11044c33a17b2624abe4307a75893c001a4800857956b41094"}, + {file = "numpy-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:596140185c7fa113563c67c2e894eabe0daea18cf8e33851738c19f70ce86aeb"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:16372619ee728ed67a2a606a614f56d3eabc5b86f8b615c79d01957062826ca8"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5521a06a3148686d9269c53b09f7d399a5725c47bbb5b35747e1cb76326b714b"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:7c8dde0ca2f77828815fd1aedfdf52e59071a5bae30dac3b4da2a335c672149a"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:77974aba6c1bc26e3c205c2214f0d5b4305bdc719268b93e768ddb17e3fdd636"}, + {file = "numpy-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d42f9c36d06440e34226e8bd65ff065ca0963aeecada587b937011efa02cdc9d"}, + {file = "numpy-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2712c5179f40af9ddc8f6727f2bd910ea0eb50206daea75f58ddd9fa3f715bb"}, + {file = "numpy-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c8b0451d2ec95010d1db8ca733afc41f659f425b7f608af569711097fd6014e2"}, + {file = "numpy-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9b4a8148c57ecac25a16b0e11798cbe88edf5237b0df99973687dd866f05e1b"}, + {file = "numpy-2.2.3-cp311-cp311-win32.whl", hash = "sha256:1f45315b2dc58d8a3e7754fe4e38b6fce132dab284a92851e41b2b344f6441c5"}, + {file = "numpy-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f48ba6f6c13e5e49f3d3efb1b51c8193215c42ac82610a04624906a9270be6f"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12c045f43b1d2915eca6b880a7f4a256f59d62df4f044788c8ba67709412128d"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87eed225fd415bbae787f93a457af7f5990b92a334e346f72070bf569b9c9c95"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:712a64103d97c404e87d4d7c47fb0c7ff9acccc625ca2002848e0d53288b90ea"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a5ae282abe60a2db0fd407072aff4599c279bcd6e9a2475500fc35b00a57c532"}, + {file = "numpy-2.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5266de33d4c3420973cf9ae3b98b54a2a6d53a559310e3236c4b2b06b9c07d4e"}, + {file = "numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe"}, + {file = "numpy-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34c1b7e83f94f3b564b35f480f5652a47007dd91f7c839f404d03279cc8dd021"}, + {file = "numpy-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4d8335b5f1b6e2bce120d55fb17064b0262ff29b459e8493d1785c18ae2553b8"}, + {file = "numpy-2.2.3-cp312-cp312-win32.whl", hash = "sha256:4d9828d25fb246bedd31e04c9e75714a4087211ac348cb39c8c5f99dbb6683fe"}, + {file = "numpy-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bfdb06b395385ea9b91bf55c1adf1b297c9fdb531552845ff1d3ea6e40d5aba"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:23c9f4edbf4c065fddb10a4f6e8b6a244342d95966a48820c614891e5059bb50"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:a0c03b6be48aaf92525cccf393265e02773be8fd9551a2f9adbe7db1fa2b60f1"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:2376e317111daa0a6739e50f7ee2a6353f768489102308b0d98fcf4a04f7f3b5"}, + {file = "numpy-2.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fb62fe3d206d72fe1cfe31c4a1106ad2b136fcc1606093aeab314f02930fdf2"}, + {file = "numpy-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52659ad2534427dffcc36aac76bebdd02b67e3b7a619ac67543bc9bfe6b7cdb1"}, + {file = "numpy-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b416af7d0ed3271cad0f0a0d0bee0911ed7eba23e66f8424d9f3dfcdcae1304"}, + {file = "numpy-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1402da8e0f435991983d0a9708b779f95a8c98c6b18a171b9f1be09005e64d9d"}, + {file = "numpy-2.2.3-cp313-cp313-win32.whl", hash = "sha256:136553f123ee2951bfcfbc264acd34a2fc2f29d7cdf610ce7daf672b6fbaa693"}, + {file = "numpy-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5b732c8beef1d7bc2d9e476dbba20aaff6167bf205ad9aa8d30913859e82884b"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:435e7a933b9fda8126130b046975a968cc2d833b505475e588339e09f7672890"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7678556eeb0152cbd1522b684dcd215250885993dd00adb93679ec3c0e6e091c"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2e8da03bd561504d9b20e7a12340870dfc206c64ea59b4cfee9fceb95070ee94"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:c9aa4496fd0e17e3843399f533d62857cef5900facf93e735ef65aa4bbc90ef0"}, + {file = "numpy-2.2.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4ca91d61a4bf61b0f2228f24bbfa6a9facd5f8af03759fe2a655c50ae2c6610"}, + {file = "numpy-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deaa09cd492e24fd9b15296844c0ad1b3c976da7907e1c1ed3a0ad21dded6f76"}, + {file = "numpy-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:246535e2f7496b7ac85deffe932896a3577be7af8fb7eebe7146444680297e9a"}, + {file = "numpy-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:daf43a3d1ea699402c5a850e5313680ac355b4adc9770cd5cfc2940e7861f1bf"}, + {file = "numpy-2.2.3-cp313-cp313t-win32.whl", hash = "sha256:cf802eef1f0134afb81fef94020351be4fe1d6681aadf9c5e862af6602af64ef"}, + {file = "numpy-2.2.3-cp313-cp313t-win_amd64.whl", hash = "sha256:aee2512827ceb6d7f517c8b85aa5d3923afe8fc7a57d028cffcd522f1c6fd082"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3c2ec8a0f51d60f1e9c0c5ab116b7fc104b165ada3f6c58abf881cb2eb16044d"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:ed2cf9ed4e8ebc3b754d398cba12f24359f018b416c380f577bbae112ca52fc9"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39261798d208c3095ae4f7bc8eaeb3481ea8c6e03dc48028057d3cbdbdb8937e"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:783145835458e60fa97afac25d511d00a1eca94d4a8f3ace9fe2043003c678e4"}, + {file = "numpy-2.2.3.tar.gz", hash = "sha256:dbdc15f0c81611925f382dfa97b3bd0bc2c1ce19d4fe50482cb0ddc12ba30020"}, ] [[package]] @@ -5851,14 +5854,14 @@ sympy = "*" [[package]] name = "openai" -version = "1.61.1" +version = "1.63.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" groups = ["main", "evaluation", "llama-index", "test"] files = [ - {file = "openai-1.61.1-py3-none-any.whl", hash = "sha256:72b0826240ce26026ac2cd17951691f046e5be82ad122d20a8e1b30ca18bd11e"}, - {file = "openai-1.61.1.tar.gz", hash = "sha256:ce1851507218209961f89f3520e06726c0aa7d0512386f0f977e3ac3e4f2472e"}, + {file = "openai-1.63.0-py3-none-any.whl", hash = "sha256:a664dfc78f0a05ca46c3e21f344f840cf6bf7174f13cfa9de214ed28bfca1dda"}, + {file = "openai-1.63.0.tar.gz", hash = "sha256:597d7a1b35b113e5a09fcb953bdb1eef44f404a39985f3d7573b3ab09221fd66"}, ] [package.dependencies] @@ -7997,14 +8000,14 @@ files = [ [[package]] name = "reportlab" -version = "4.3.0" +version = "4.3.1" description = "The Reportlab Toolkit" optional = false python-versions = "<4,>=3.7" groups = ["test"] files = [ - {file = "reportlab-4.3.0-py3-none-any.whl", hash = "sha256:81e7bb207132c430cdb9d9f41cfdd1e0fbd1b0eb26a0f7def55d39c1680ad345"}, - {file = "reportlab-4.3.0.tar.gz", hash = "sha256:a90754589bea1c921a745aa981677d2d144f50c690800cda29aafae67c1a8d93"}, + {file = "reportlab-4.3.1-py3-none-any.whl", hash = "sha256:0f37dd16652db3ef84363cf744632a28c38bd480d5bf94683466852d7bb678dd"}, + {file = "reportlab-4.3.1.tar.gz", hash = "sha256:230f78b21667194d8490ac9d12958d5c14686352db7fbe03b95140fafdf5aa97"}, ] [package.dependencies] @@ -8012,9 +8015,9 @@ chardet = "*" pillow = ">=9.0.0" [package.extras] -accel = ["rl-accel (>=0.9.0,<1.1)"] +accel = ["rl_accel (>=0.9.0,<1.1)"] pycairo = ["freetype-py (>=2.3.0,<2.4)", "rlPyCairo (>=0.2.0,<1)"] -renderpm = ["rl-renderPM (>=4.0.3,<4.1)"] +renderpm = ["rl_renderPM (>=4.0.3,<4.1)"] [[package]] name = "requests" @@ -8278,14 +8281,14 @@ files = [ [[package]] name = "runloop-api-client" -version = "0.22.0" +version = "0.23.0" description = "The official Python library for the runloop API" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "runloop_api_client-0.22.0-py3-none-any.whl", hash = "sha256:54cb3e94970e7734a97fb5dc1adc40e9beb3f2280c6de830154bf3eaa46217de"}, - {file = "runloop_api_client-0.22.0.tar.gz", hash = "sha256:6ec1f3aff4cf3984ea95e6d1498b5bc9bfe7b153cd2acb94d82fc86da2965cd5"}, + {file = "runloop_api_client-0.23.0-py3-none-any.whl", hash = "sha256:ee42c46385a986648a6c7bdf49833ec9010a1ffdf1a58c4957940f150606e3ac"}, + {file = "runloop_api_client-0.23.0.tar.gz", hash = "sha256:93b2915d78c3258eba0924a2f1db246b586fa92bb318148ffd5d45fcb60adb3e"}, ] [package.dependencies] @@ -10652,4 +10655,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "e34523a44c733c59e3e2e580a4f17d45c8cd6e593a773707c77f21d0f848b185" +content-hash = "431b15e98a730d03d7b3b8ea9ea15d812cf50802b35c18c741a69518c1a00464" diff --git a/pyproject.toml b/pyproject.toml index 6e3dfe61a3b5..c4d209769f8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ numpy = "*" json-repair = "*" browsergym-core = "0.10.2" # integrate browsergym-core as the browsing interface html2text = "*" -e2b = ">=1.0.5,<1.1.0" +e2b = ">=1.0.5,<1.2.0" pexpect = "*" jinja2 = "^3.1.3" python-multipart = "*" @@ -63,7 +63,7 @@ protobuf = "^4.21.6,<5.0.0" # chromadb currently fails on 5.0+ opentelemetry-api = "1.25.0" opentelemetry-exporter-otlp-proto-grpc = "1.25.0" modal = ">=0.66.26,<0.74.0" -runloop-api-client = "0.22.0" +runloop-api-client = "0.23.0" libtmux = ">=0.37,<0.40" pygithub = "^2.5.0" joblib = "*" From 63565982aa1f94a5cd1e468a705777848634357c Mon Sep 17 00:00:00 2001 From: Cheng Yang <93481273+young010101@users.noreply.github.com> Date: Sat, 15 Feb 2025 09:51:59 +0800 Subject: [PATCH 16/44] docs: improve docstrings for CLI and config utils (#5398) Co-authored-by: Engel Nyst --- docs/modules/usage/architecture/runtime.md | 6 +-- openhands/README.md | 3 ++ openhands/core/cli.py | 2 +- openhands/core/config/utils.py | 18 +++++---- openhands/core/main.py | 21 ++++++++++ openhands/core/message.py | 4 +- openhands/llm/llm.py | 4 +- .../runtime/impl/docker/docker_runtime.py | 13 +++--- .../plugins/agent_skills/file_ops/file_ops.py | 40 +++++++++++++------ openhands/runtime/utils/runtime_build.py | 4 +- openhands/server/routes/public.py | 3 +- tests/unit/test_micro_agents.py | 2 +- 12 files changed, 82 insertions(+), 38 deletions(-) diff --git a/docs/modules/usage/architecture/runtime.md b/docs/modules/usage/architecture/runtime.md index b08a1ed99bbf..f682f249acb3 100644 --- a/docs/modules/usage/architecture/runtime.md +++ b/docs/modules/usage/architecture/runtime.md @@ -54,14 +54,13 @@ graph TD 6. Action Execution: The runtime client receives actions from the backend, executes them in the sandboxed environment, and sends back observations 7. Observation Return: The action execution server sends execution results back to the OpenHands backend as observations - The role of the client: + - It acts as an intermediary between the OpenHands backend and the sandboxed environment - It executes various types of actions (shell commands, file operations, Python code, etc.) safely within the container - It manages the state of the sandboxed environment, including the current working directory and loaded plugins - It formats and returns observations to the backend, ensuring a consistent interface for processing results - ## How OpenHands builds and maintains OH Runtime images OpenHands' approach to building and managing runtime images ensures efficiency, consistency, and flexibility in creating and maintaining Docker images for both production and development environments. @@ -78,16 +77,15 @@ Tags may be in one of 2 formats: - **Source Tag**: `oh_v{openhands_version}_{16_digit_lock_hash}_{16_digit_source_hash}` (e.g.: `oh_v0.9.9_1234567890abcdef_1234567890abcdef`) - #### Source Tag - Most Specific This is the first 16 digits of the MD5 of the directory hash for the source directory. This gives a hash for only the openhands source - #### Lock Tag This hash is built from the first 16 digits of the MD5 of: + - The name of the base image upon which the image was built (e.g.: `nikolaik/python-nodejs:python3.12-nodejs22`) - The content of the `pyproject.toml` included in the image. - The content of the `poetry.lock` included in the image. diff --git a/openhands/README.md b/openhands/README.md index 4c6a67f09787..f43f9ae3795b 100644 --- a/openhands/README.md +++ b/openhands/README.md @@ -6,6 +6,7 @@ This diagram provides an overview of the roles of each component and how they co ![OpenHands System Architecture Diagram (July 4, 2024)](../docs/static/img/system_architecture_overview.png) ## Classes + The key classes in OpenHands are: * LLM: brokers all interactions with large language models. Works with any underlying completion model, thanks to LiteLLM. @@ -23,7 +24,9 @@ The key classes in OpenHands are: * ConversationManager: keeps a list of active sessions, and ensures requests are routed to the correct Session ## Control Flow + Here's the basic loop (in pseudocode) that drives agents. + ```python while True: prompt = agent.generate_prompt(state) diff --git a/openhands/core/cli.py b/openhands/core/cli.py index f1f687fed52a..1e31537155ac 100644 --- a/openhands/core/cli.py +++ b/openhands/core/cli.py @@ -99,7 +99,7 @@ def read_input(config: AppConfig) -> str: async def main(loop: asyncio.AbstractEventLoop): - """Runs the agent in CLI mode""" + """Runs the agent in CLI mode.""" args = parse_arguments() diff --git a/openhands/core/config/utils.py b/openhands/core/config/utils.py index e0b1ee71adc3..f057eb6ad2fe 100644 --- a/openhands/core/config/utils.py +++ b/openhands/core/config/utils.py @@ -29,9 +29,14 @@ load_dotenv() -def load_from_env(cfg: AppConfig, env_or_toml_dict: dict | MutableMapping[str, str]): - """Reads the env-style vars and sets config attributes based on env vars or a config.toml dict. - Compatibility with vars like LLM_BASE_URL, AGENT_MEMORY_ENABLED, SANDBOX_TIMEOUT and others. +def load_from_env( + cfg: AppConfig, env_or_toml_dict: dict | MutableMapping[str, str] +) -> None: + """Sets config attributes from environment variables or TOML dictionary. + + Reads environment-style variables and updates the config attributes accordingly. + Supports configuration of LLM settings (e.g., LLM_BASE_URL), agent settings + (e.g., AGENT_MEMORY_ENABLED), sandbox settings (e.g., SANDBOX_TIMEOUT), and more. Args: cfg: The AppConfig object to set attributes on. @@ -44,7 +49,7 @@ def get_optional_type(union_type: UnionType) -> Any: return next((t for t in types if t is not type(None)), None) # helper function to set attributes based on env vars - def set_attr_from_env(sub_config: BaseModel, prefix=''): + def set_attr_from_env(sub_config: BaseModel, prefix='') -> None: """Set attributes of a config model based on environment variables.""" for field_name, field_info in sub_config.model_fields.items(): field_value = getattr(sub_config, field_name) @@ -95,7 +100,7 @@ def set_attr_from_env(sub_config: BaseModel, prefix=''): set_attr_from_env(default_agent_config, 'AGENT_') -def load_from_toml(cfg: AppConfig, toml_file: str = 'config.toml'): +def load_from_toml(cfg: AppConfig, toml_file: str = 'config.toml') -> None: """Load the config from the toml file. Supports both styles of config vars. Args: @@ -103,8 +108,7 @@ def load_from_toml(cfg: AppConfig, toml_file: str = 'config.toml'): toml_file: The path to the toml file. Defaults to 'config.toml'. See Also: - - `config.template.toml` for the full list of config options. - - `SandboxConfig` for the sandbox-specific config options. + - config.template.toml for the full list of config options. """ # try to read the config.toml file into the config object try: diff --git a/openhands/core/main.py b/openhands/core/main.py index 474757d9c70c..2652931cce7a 100644 --- a/openhands/core/main.py +++ b/openhands/core/main.py @@ -78,6 +78,7 @@ async def run_controller( headless_mode: bool = True, ) -> State | None: """Main coroutine to run the agent controller with task input flexibility. + It's only used when you launch openhands backend directly via cmdline. Args: @@ -91,6 +92,26 @@ async def run_controller( fake_user_response_fn: An optional function that receives the current state (could be None) and returns a fake user response. headless_mode: Whether the agent is run in headless mode. + + Returns: + The final state of the agent, or None if an error occurred. + + Raises: + AssertionError: If initial_user_action is not an Action instance. + Exception: Various exceptions may be raised during execution and will be logged. + + Notes: + - State persistence: If config.file_store is set, the agent's state will be + saved between sessions. + - Trajectories: If config.trajectories_path is set, execution history will be + saved as JSON for analysis. + - Budget control: Execution is limited by config.max_iterations and + config.max_budget_per_task. + + Example: + >>> config = load_app_config() + >>> action = MessageAction(content="Write a hello world program") + >>> state = await run_controller(config=config, initial_user_action=action) """ sid = sid or generate_sid(config) diff --git a/openhands/core/message.py b/openhands/core/message.py index ea4f0106abea..b508142242fd 100644 --- a/openhands/core/message.py +++ b/openhands/core/message.py @@ -122,8 +122,8 @@ def _list_serializer(self) -> dict: def _add_tool_call_keys(self, message_dict: dict) -> dict: """Add tool call keys if we have a tool call or response. - NOTE: this is necessary for both native and non-native tool calling.""" - + NOTE: this is necessary for both native and non-native tool calling + """ # an assistant message calling a tool if self.tool_calls is not None: message_dict['tool_calls'] = [ diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index e8dd2f6f1ef8..b5fe67943467 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -605,7 +605,9 @@ def _is_local(self) -> bool: return False def _completion_cost(self, response) -> float: - """Calculate the cost of a completion response based on the model. Local models are treated as free. + """Calculate completion cost and update metrics with running total. + + Calculate the cost of a completion response based on the model. Local models are treated as free. Add the current cost into total cost in metrics. Args: diff --git a/openhands/runtime/impl/docker/docker_runtime.py b/openhands/runtime/impl/docker/docker_runtime.py index 4312f3b6e6e4..b2c5e980226e 100644 --- a/openhands/runtime/impl/docker/docker_runtime.py +++ b/openhands/runtime/impl/docker/docker_runtime.py @@ -39,6 +39,7 @@ class DockerRuntime(ActionExecutionClient): """This runtime will subscribe the event stream. + When receive an event, it will send the event to runtime-client which run inside the docker environment. Args: @@ -405,11 +406,11 @@ def pause(self): """Pause the runtime by stopping the container. This is different from container.stop() as it ensures environment variables are properly preserved.""" if not self.container: - raise RuntimeError("Container not initialized") - + raise RuntimeError('Container not initialized') + # First, ensure all environment variables are properly persisted in .bashrc # This is already handled by add_env_vars in base.py - + # Stop the container self.container.stop() self.log('debug', f'Container {self.container_name} paused') @@ -418,12 +419,12 @@ def resume(self): """Resume the runtime by starting the container. This is different from container.start() as it ensures environment variables are properly restored.""" if not self.container: - raise RuntimeError("Container not initialized") - + raise RuntimeError('Container not initialized') + # Start the container self.container.start() self.log('debug', f'Container {self.container_name} resumed') - + # Wait for the container to be ready self._wait_until_alive() diff --git a/openhands/runtime/plugins/agent_skills/file_ops/file_ops.py b/openhands/runtime/plugins/agent_skills/file_ops/file_ops.py index b2e1b4c8aa4c..47451c2985c1 100644 --- a/openhands/runtime/plugins/agent_skills/file_ops/file_ops.py +++ b/openhands/runtime/plugins/agent_skills/file_ops/file_ops.py @@ -1,6 +1,8 @@ -"""file_ops.py +"""File operations module for OpenHands agent. -This module provides various file manipulation skills for the OpenHands agent. +This module provides a collection of file manipulation skills that enable the OpenHands +agent to perform various file operations such as opening, searching, and navigating +through files and directories. Functions: - open_file(path: str, line_number: int | None = 1, context_lines: int = 100): Opens a file and optionally moves to a specific line. @@ -10,6 +12,9 @@ - search_dir(search_term: str, dir_path: str = './'): Searches for a term in all files in the specified directory. - search_file(search_term: str, file_path: str | None = None): Searches for a term in the specified file or the currently open file. - find_file(file_name: str, dir_path: str = './'): Finds all files with the given name in the specified directory. + +Note: + All functions return string representations of their results. """ import os @@ -81,11 +86,18 @@ def _clamp(value, min_value, max_value): def _lint_file(file_path: str) -> tuple[str | None, int | None]: - """Lint the file at the given path and return a tuple with a boolean indicating if there are errors, + """Perform linting on a file and identify the first error location. + + Lint the file at the given path and return a tuple with a boolean indicating if there are errors, and the line number of the first error, if any. + Args: + file_path: str: The path to the file to lint. + Returns: - tuple[str | None, int | None]: (lint_error, first_error_line_number) + A tuple containing: + - The lint error message if found, None otherwise + - The line number of the first error, None if no errors """ linter = DefaultLinter() lint_error: list[LintResult] = linter.lint(file_path) @@ -165,14 +177,18 @@ def _cur_file_header(current_file, total_lines) -> str: def open_file( path: str, line_number: int | None = 1, context_lines: int | None = WINDOW ) -> None: - """Opens the file at the given path in the editor. IF the file is to be edited, first use `scroll_down` repeatedly to read the full file! - If line_number is provided, the window will be moved to include that line. - It only shows the first 100 lines by default! `context_lines` is the max number of lines to be displayed, up to 100. Use `scroll_up` and `scroll_down` to view more content up or down. + """Opens a file in the editor and optionally positions at a specific line. + + The function displays a limited window of content, centered around the specified line + number if provided. To view the complete file content, the agent should use scroll_down and scroll_up + commands iteratively. Args: - path: str: The path to the file to open, preferred absolute path. - line_number: int | None = 1: The line number to move to. Defaults to 1. - context_lines: int | None = 100: Only shows this number of lines in the context window (usually from line 1), with line_number as the center (if possible). Defaults to 100. + path: The path to the file to open. Absolute path is recommended. + line_number: The target line number to center the view on (if possible). + Defaults to 1. + context_lines: Maximum number of lines to display in the view window. + Limited to 100 lines. Defaults to 100. """ global CURRENT_FILE, CURRENT_LINE, WINDOW @@ -316,8 +332,8 @@ def search_file(search_term: str, file_path: str | None = None) -> None: """Searches for search_term in file. If file is not provided, searches in the current open file. Args: - search_term: str: The term to search for. - file_path: str | None: The path to the file to search. + search_term: The term to search for. + file_path: The path to the file to search. """ global CURRENT_FILE if file_path is None: diff --git a/openhands/runtime/utils/runtime_build.py b/openhands/runtime/utils/runtime_build.py index bbb83ac7f9df..862ce04d7a58 100644 --- a/openhands/runtime/utils/runtime_build.py +++ b/openhands/runtime/utils/runtime_build.py @@ -69,7 +69,6 @@ def get_runtime_image_repo_and_tag(base_image: str) -> tuple[str, str]: Returns: - tuple[str, str]: The Docker repo and tag of the Docker image """ - if get_runtime_image_repo() in base_image: logger.debug( f'The provided image [{base_image}] is already a valid runtime image.\n' @@ -115,6 +114,7 @@ def build_runtime_image( extra_build_args: List[str] | None = None, ) -> str: """Prepares the final docker build folder. + If dry_run is False, it will also build the OpenHands runtime Docker image using the docker build folder. Parameters: @@ -349,7 +349,7 @@ def _build_sandbox_image( platform: str | None = None, extra_build_args: List[str] | None = None, ): - """Build and tag the sandbox image. The image will be tagged with all tags that do not yet exist""" + """Build and tag the sandbox image. The image will be tagged with all tags that do not yet exist.""" names = [ f'{runtime_image_repo}:{source_tag}', f'{runtime_image_repo}:{lock_tag}', diff --git a/openhands/server/routes/public.py b/openhands/server/routes/public.py index a5c861a62e59..59e5c4e4efe6 100644 --- a/openhands/server/routes/public.py +++ b/openhands/server/routes/public.py @@ -23,8 +23,7 @@ @app.get('/models') async def get_litellm_models() -> list[str]: - """ - Get all models supported by LiteLLM. + """Get all models supported by LiteLLM. This function combines models from litellm and Bedrock, removing any error-prone Bedrock models. diff --git a/tests/unit/test_micro_agents.py b/tests/unit/test_micro_agents.py index c7461bbda226..7f78df16b183 100644 --- a/tests/unit/test_micro_agents.py +++ b/tests/unit/test_micro_agents.py @@ -53,7 +53,7 @@ def test_all_agents_are_loaded(): def test_coder_agent_with_summary(event_stream: EventStream, agent_configs: dict): - """Coder agent should render code summary as part of prompt""" + """Coder agent should render code summary as part of prompt.""" mock_llm = MagicMock() content = json.dumps({'action': 'finish', 'args': {}}) mock_llm.completion.return_value = {'choices': [{'message': {'content': content}}]} From efbff2e655f9eb6dac399da44ef203c0e92b42ef Mon Sep 17 00:00:00 2001 From: Boxuan Li Date: Fri, 14 Feb 2025 21:01:42 -0800 Subject: [PATCH 17/44] Add a sanity test for load_app_config and get_agent_config_arg (#6723) --- tests/unit/test_config.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 7aab02c0e019..10f09447ba6c 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -11,6 +11,7 @@ finalize_config, get_agent_config_arg, get_llm_config_arg, + load_app_config, load_from_env, load_from_toml, ) @@ -811,3 +812,29 @@ def test_get_agent_config_arg(temp_toml_file): assert not agent_config2.memory_enabled assert agent_config2.enable_prompt_extensions assert agent_config2.memory_max_threads == 10 + + +def test_agent_config_custom_group_name(temp_toml_file): + temp_toml = """ +[core] +max_iterations = 99 + +[agent.group1] +memory_enabled = true + +[agent.group2] +memory_enabled = false +""" + with open(temp_toml_file, 'w') as f: + f.write(temp_toml) + + # just a sanity check that load app config wouldn't fail + app_config = load_app_config(config_file=temp_toml_file) + assert app_config.max_iterations == 99 + + # run_infer in evaluation can use `get_agent_config_arg` to load custom + # agent configs with any group name (not just agent name) + agent_config1 = get_agent_config_arg('group1', temp_toml_file) + assert agent_config1.memory_enabled + agent_config2 = get_agent_config_arg('group2', temp_toml_file) + assert not agent_config2.memory_enabled From 4443417c753054d460c2ecc25d1c2c97031155a3 Mon Sep 17 00:00:00 2001 From: Boxuan Li Date: Fri, 14 Feb 2025 21:01:57 -0800 Subject: [PATCH 18/44] A few fixes for TAC evaluation harness (#6586) --- .../benchmarks/the_agent_company/browsing.py | 4 +++- .../benchmarks/the_agent_company/run_infer.py | 14 ++++++++++++-- .../the_agent_company/scripts/run_infer.sh | 4 ++-- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/evaluation/benchmarks/the_agent_company/browsing.py b/evaluation/benchmarks/the_agent_company/browsing.py index 5ce97129777a..e8747c2dede9 100644 --- a/evaluation/benchmarks/the_agent_company/browsing.py +++ b/evaluation/benchmarks/the_agent_company/browsing.py @@ -267,7 +267,9 @@ def pre_login( obs: BrowserOutputObservation = runtime.run_action(browser_action) logger.debug(obs, extra={'msg_type': 'OBSERVATION'}) if save_screenshots: - image_data = base64.b64decode(obs.screenshot) + image_data = base64.b64decode( + obs.screenshot.replace('data:image/png;base64,', '') + ) with open(os.path.join(directory, f'{image_id}.png'), 'wb') as file: file.write(image_data) image_id += 1 diff --git a/evaluation/benchmarks/the_agent_company/run_infer.py b/evaluation/benchmarks/the_agent_company/run_infer.py index cbfbb386fdde..84fb057ec791 100644 --- a/evaluation/benchmarks/the_agent_company/run_infer.py +++ b/evaluation/benchmarks/the_agent_company/run_infer.py @@ -36,7 +36,7 @@ def get_config( task_short_name: str, mount_path_on_host: str, llm_config: LLMConfig, - agent_config: AgentConfig, + agent_config: AgentConfig | None, ) -> AppConfig: config = AppConfig( run_as_openhands=False, @@ -159,11 +159,21 @@ def run_solver( os.makedirs(screenshots_dir, exist_ok=True) for image_id, obs in enumerate(state.history): if isinstance(obs, BrowserOutputObservation): - image_data = base64.b64decode(obs.screenshot) + image_data = base64.b64decode( + obs.screenshot.replace('data:image/png;base64,', '') + ) with open( os.path.join(screenshots_dir, f'{image_id}.png'), 'wb' ) as file: file.write(image_data) + if obs.set_of_marks: + som_image_data = base64.b64decode( + obs.set_of_marks.replace('data:image/png;base64,', '') + ) + with open( + os.path.join(screenshots_dir, f'{image_id}_som.png'), 'wb' + ) as file: + file.write(som_image_data) if save_final_state: os.makedirs(state_dir, exist_ok=True) diff --git a/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh b/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh index 3366c9826005..e266e5990b1a 100755 --- a/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh +++ b/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh @@ -129,8 +129,6 @@ temp_file="tasks_${START_PERCENTILE}_${END_PERCENTILE}.md" sed -n "${start_line},${end_line}p" tasks.md > "$temp_file" while IFS= read -r task_image; do - docker pull $task_image - # Remove prefix using ## to remove longest matching pattern from start task_name=${task_image##ghcr.io/theagentcompany/} @@ -144,6 +142,8 @@ while IFS= read -r task_image; do continue fi + docker pull $task_image + # Build the Python command COMMAND="poetry run python run_infer.py \ --agent-llm-config \"$AGENT_LLM_CONFIG\" \ From 30e39e85d081bd750ad5ca9b238d87d2f48a89d8 Mon Sep 17 00:00:00 2001 From: Christopher Pereira Date: Sat, 15 Feb 2025 02:58:16 -0300 Subject: [PATCH 19/44] Show docker build errors (#6695) --- openhands/core/logger.py | 3 +++ openhands/runtime/builder/docker.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/openhands/core/logger.py b/openhands/core/logger.py index 701a5bb0c39a..b384fedac1d8 100644 --- a/openhands/core/logger.py +++ b/openhands/core/logger.py @@ -156,11 +156,13 @@ class RollingLogger: max_lines: int char_limit: int log_lines: list[str] + all_lines: str def __init__(self, max_lines=10, char_limit=80): self.max_lines = max_lines self.char_limit = char_limit self.log_lines = [''] * self.max_lines + self.all_lines = '' def is_enabled(self): return DEBUG and sys.stdout.isatty() @@ -175,6 +177,7 @@ def add_line(self, line): self.log_lines.pop(0) self.log_lines.append(line[: self.char_limit]) self.print_lines() + self.all_lines += line + '\n' def write_immediately(self, line): self._write(line) diff --git a/openhands/runtime/builder/docker.py b/openhands/runtime/builder/docker.py index de99bd440f3e..dbbea66daed0 100644 --- a/openhands/runtime/builder/docker.py +++ b/openhands/runtime/builder/docker.py @@ -168,8 +168,10 @@ def build( ) except subprocess.CalledProcessError as e: - logger.error(f'Image build failed:\n{e}') + logger.error(f'Image build failed:\n{e}') # TODO: {e} is empty logger.error(f'Command output:\n{e.output}') + if self.rolling_logger.is_enabled(): + logger.error("Docker build output:\n" + self.rolling_logger.all_lines) # Show the error raise except subprocess.TimeoutExpired: From b018567d53285c979af1d07bca4830c9e2b5bf3b Mon Sep 17 00:00:00 2001 From: Arpan Koirala Date: Mon, 17 Feb 2025 17:03:27 +0545 Subject: [PATCH 20/44] fix: no interaction when clearing poetry cache (#6752) --- openhands/runtime/utils/runtime_templates/Dockerfile.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openhands/runtime/utils/runtime_templates/Dockerfile.j2 b/openhands/runtime/utils/runtime_templates/Dockerfile.j2 index ef073a6a7a84..c0d915f75ec9 100644 --- a/openhands/runtime/utils/runtime_templates/Dockerfile.j2 +++ b/openhands/runtime/utils/runtime_templates/Dockerfile.j2 @@ -88,7 +88,7 @@ RUN \ # Set environment variables echo "OH_INTERPRETER_PATH=$(/openhands/micromamba/bin/micromamba run -n openhands poetry run python -c "import sys; print(sys.executable)")" >> /etc/environment && \ # Clear caches - /openhands/micromamba/bin/micromamba run -n openhands poetry cache clear --all . && \ + /openhands/micromamba/bin/micromamba run -n openhands poetry cache clear --all . -n && \ # Set permissions chmod -R g+rws /openhands/poetry && \ mkdir -p /openhands/workspace && chmod -R g+rws,o+rw /openhands/workspace && \ From 745038b394d78b112223c406913f1d0b4d390843 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 11:35:50 +0000 Subject: [PATCH 21/44] chore(deps): bump the version-all group in /frontend with 4 updates (#6725) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: amanape <83104063+amanape@users.noreply.github.com> --- frontend/package-lock.json | 378 ++++++++++++++++++------------------- frontend/package.json | 6 +- 2 files changed, 188 insertions(+), 196 deletions(-) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 0115a8c6934b..a261adf741e7 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -23,12 +23,12 @@ "eslint-config-airbnb-typescript": "^18.0.0", "framer-motion": "^12.4.2", "i18next": "^24.2.2", - "i18next-browser-languagedetector": "^8.0.2", + "i18next-browser-languagedetector": "^8.0.3", "i18next-http-backend": "^3.0.2", "isbot": "^5.1.22", "jose": "^5.9.4", "monaco-editor": "^0.52.2", - "posthog-js": "^1.217.2", + "posthog-js": "^1.217.6", "react": "^19.0.0", "react-dom": "^19.0.0", "react-highlight": "^0.15.0", @@ -58,7 +58,7 @@ "@testing-library/jest-dom": "^6.6.1", "@testing-library/react": "^16.2.0", "@testing-library/user-event": "^14.6.1", - "@types/node": "^22.13.2", + "@types/node": "^22.13.4", "@types/react": "^19.0.8", "@types/react-dom": "^19.0.3", "@types/react-highlight": "^0.12.8", @@ -171,22 +171,21 @@ } }, "node_modules/@babel/core": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.8.tgz", - "integrity": "sha512-l+lkXCHS6tQEc5oUpK28xBOZ6+HwaH7YwoYQbLFiYb4nS2/l1tKnZEtEWkD0GuiYdvArf9qBS0XlQGXzPMsNqQ==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.9.tgz", + "integrity": "sha512-lWBYIrF7qK5+GjY5Uy+/hEgp8OJWOD/rpy74GplYRhEauvbHDeFB8t5hPOZxCZ0Oxf4Cc36tK51/l3ymJysrKw==", "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.8", + "@babel/generator": "^7.26.9", "@babel/helper-compilation-targets": "^7.26.5", "@babel/helper-module-transforms": "^7.26.0", - "@babel/helpers": "^7.26.7", - "@babel/parser": "^7.26.8", - "@babel/template": "^7.26.8", - "@babel/traverse": "^7.26.8", - "@babel/types": "^7.26.8", - "@types/gensync": "^1.0.0", + "@babel/helpers": "^7.26.9", + "@babel/parser": "^7.26.9", + "@babel/template": "^7.26.9", + "@babel/traverse": "^7.26.9", + "@babel/types": "^7.26.9", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -211,13 +210,13 @@ } }, "node_modules/@babel/generator": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.8.tgz", - "integrity": "sha512-ef383X5++iZHWAXX0SXQR6ZyQhw/0KtTkrTz61WXRhFM6dhpHulO/RJz79L8S6ugZHJkOOkUrUdxgdF2YiPFnA==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.9.tgz", + "integrity": "sha512-kEWdzjOAUMW4hAyrzJ0ZaTOu9OmpyDIQicIh0zg0EEcEkYXZb2TjtBhnHi2ViX7PKwZqF4xwqfAm299/QMP3lg==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.8", - "@babel/types": "^7.26.8", + "@babel/parser": "^7.26.9", + "@babel/types": "^7.26.9", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^3.0.2" @@ -265,18 +264,18 @@ } }, "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.9.tgz", - "integrity": "sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.26.9.tgz", + "integrity": "sha512-ubbUqCofvxPRurw5L8WTsCLSkQiVpov4Qx0WMA+jUN+nXBK8ADPlJO1grkFw5CWKC5+sZSOfuGMdX1aI1iT9Sg==", "dev": true, "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.25.9", "@babel/helper-member-expression-to-functions": "^7.25.9", "@babel/helper-optimise-call-expression": "^7.25.9", - "@babel/helper-replace-supers": "^7.25.9", + "@babel/helper-replace-supers": "^7.26.5", "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", - "@babel/traverse": "^7.25.9", + "@babel/traverse": "^7.26.9", "semver": "^6.3.1" }, "engines": { @@ -422,25 +421,25 @@ } }, "node_modules/@babel/helpers": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.7.tgz", - "integrity": "sha512-8NHiL98vsi0mbPQmYAGWwfcFaOy4j2HY49fXJCfuDcdE7fMIsH9a7GdaeXpIBsbT7307WU8KCMp5pUVDNL4f9A==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.9.tgz", + "integrity": "sha512-Mz/4+y8udxBKdmzt/UjPACs4G3j5SshJJEFFKxlCGPydG4JAHXxjWjAwjd09tf6oINvl1VfMJo+nB7H2YKQ0dA==", "license": "MIT", "dependencies": { - "@babel/template": "^7.25.9", - "@babel/types": "^7.26.7" + "@babel/template": "^7.26.9", + "@babel/types": "^7.26.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.8.tgz", - "integrity": "sha512-TZIQ25pkSoaKEYYaHbbxkfL36GNsQ6iFiBbeuzAkLnXayKR1yP1zFe+NxuZWWsUyvt8icPU9CCq0sgWGXR1GEw==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.9.tgz", + "integrity": "sha512-81NWa1njQblgZbQHxWHpxxCzNsa3ZwvFqpUg7P+NNUU6f3UU2jBEg4OlF/J6rl8+PQGh1q6/zWScd001YwcA5A==", "license": "MIT", "dependencies": { - "@babel/types": "^7.26.8" + "@babel/types": "^7.26.9" }, "bin": { "parser": "bin/babel-parser.js" @@ -585,9 +584,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.7.tgz", - "integrity": "sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.9.tgz", + "integrity": "sha512-aA63XwOkcl4xxQa3HjPMqOP6LiK0ZDv3mUPYEFXkpHbaFjtGggE1A61FjFzJnB+p7/oy2gA8E+rcBNl/zC1tMg==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" @@ -597,30 +596,30 @@ } }, "node_modules/@babel/template": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.8.tgz", - "integrity": "sha512-iNKaX3ZebKIsCvJ+0jd6embf+Aulaa3vNBqZ41kM7iTWjx5qzWKXGHiJUW3+nTpQ18SG11hdF8OAzKrpXkb96Q==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.9.tgz", + "integrity": "sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==", "license": "MIT", "dependencies": { "@babel/code-frame": "^7.26.2", - "@babel/parser": "^7.26.8", - "@babel/types": "^7.26.8" + "@babel/parser": "^7.26.9", + "@babel/types": "^7.26.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.8.tgz", - "integrity": "sha512-nic9tRkjYH0oB2dzr/JoGIm+4Q6SuYeLEiIiZDwBscRMYFJ+tMAz98fuel9ZnbXViA2I0HVSSRRK8DW5fjXStA==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.9.tgz", + "integrity": "sha512-ZYW7L+pL8ahU5fXmNbPF+iZFHCv5scFak7MZ9bwaRPLUhHh7QQEMjZUg0HevihoqCM5iSYHN61EyCoZvqC+bxg==", "license": "MIT", "dependencies": { "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.8", - "@babel/parser": "^7.26.8", - "@babel/template": "^7.26.8", - "@babel/types": "^7.26.8", + "@babel/generator": "^7.26.9", + "@babel/parser": "^7.26.9", + "@babel/template": "^7.26.9", + "@babel/types": "^7.26.9", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -629,9 +628,9 @@ } }, "node_modules/@babel/types": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.8.tgz", - "integrity": "sha512-eUuWapzEGWFEpHFxgEaBG8e3n6S8L3MSu0oda755rOfabWPnh0Our1AozNFVUxGFIhbKgd1ksprsoDGMinTOTA==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.9.tgz", + "integrity": "sha512-Y3IR1cRnOxOCDvMmNiym7XpXQ93iGDDPHx+Zj+NM+rg0fBaShfQLkg+hKPaZCEvg5N/LeCo4+Rj/i3FuJsIQaw==", "license": "MIT", "dependencies": { "@babel/helper-string-parser": "^7.25.9", @@ -3220,13 +3219,13 @@ "license": "BSD-3-Clause" }, "node_modules/@inquirer/confirm": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.5.tgz", - "integrity": "sha512-ZB2Cz8KeMINUvoeDi7IrvghaVkYT2RB0Zb31EaLWOE87u276w4wnApv0SH2qWaJ3r0VSUa3BIuz7qAV2ZvsZlg==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.6.tgz", + "integrity": "sha512-6ZXYK3M1XmaVBZX6FCfChgtponnL0R6I7k8Nu+kaoNkT828FVZTcca1MqmWQipaW2oNREQl5AaPCUOOCVNdRMw==", "dev": true, "license": "MIT", "dependencies": { - "@inquirer/core": "^10.1.6", + "@inquirer/core": "^10.1.7", "@inquirer/type": "^3.0.4" }, "engines": { @@ -3242,9 +3241,9 @@ } }, "node_modules/@inquirer/core": { - "version": "10.1.6", - "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.6.tgz", - "integrity": "sha512-Bwh/Zk6URrHwZnSSzAZAKH7YgGYi0xICIBDFOqBQoXNNAzBHw/bgXgLmChfp+GyR3PnChcTbiCTZGC6YJNJkMA==", + "version": "10.1.7", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.7.tgz", + "integrity": "sha512-AA9CQhlrt6ZgiSy6qoAigiA1izOa751ugX6ioSjqgJ+/Gd+tEN/TORk5sUYNjXuHWfW0r1n/a6ak4u/NqHHrtA==", "dev": true, "license": "MIT", "dependencies": { @@ -5613,9 +5612,9 @@ } }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.34.6.tgz", - "integrity": "sha512-+GcCXtOQoWuC7hhX1P00LqjjIiS/iOouHXhMdiDSnq/1DGTox4SpUvO52Xm+div6+106r+TcvOeo/cxvyEyTgg==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.34.8.tgz", + "integrity": "sha512-q217OSE8DTp8AFHuNHXo0Y86e1wtlfVrXiAlwkIvGRQv9zbc6mE3sjIVfwI8sYUyNxwOg0j/Vm1RKM04JcWLJw==", "cpu": [ "arm" ], @@ -5626,9 +5625,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.34.6.tgz", - "integrity": "sha512-E8+2qCIjciYUnCa1AiVF1BkRgqIGW9KzJeesQqVfyRITGQN+dFuoivO0hnro1DjT74wXLRZ7QF8MIbz+luGaJA==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.34.8.tgz", + "integrity": "sha512-Gigjz7mNWaOL9wCggvoK3jEIUUbGul656opstjaUSGC3eT0BM7PofdAJaBfPFWWkXNVAXbaQtC99OCg4sJv70Q==", "cpu": [ "arm64" ], @@ -5639,9 +5638,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.34.6.tgz", - "integrity": "sha512-z9Ib+OzqN3DZEjX7PDQMHEhtF+t6Mi2z/ueChQPLS/qUMKY7Ybn5A2ggFoKRNRh1q1T03YTQfBTQCJZiepESAg==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.34.8.tgz", + "integrity": "sha512-02rVdZ5tgdUNRxIUrFdcMBZQoaPMrxtwSb+/hOfBdqkatYHR3lZ2A2EGyHq2sGOd0Owk80oV3snlDASC24He3Q==", "cpu": [ "arm64" ], @@ -5652,9 +5651,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.34.6.tgz", - "integrity": "sha512-PShKVY4u0FDAR7jskyFIYVyHEPCPnIQY8s5OcXkdU8mz3Y7eXDJPdyM/ZWjkYdR2m0izD9HHWA8sGcXn+Qrsyg==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.34.8.tgz", + "integrity": "sha512-qIP/elwR/tq/dYRx3lgwK31jkZvMiD6qUtOycLhTzCvrjbZ3LjQnEM9rNhSGpbLXVJYQ3rq39A6Re0h9tU2ynw==", "cpu": [ "x64" ], @@ -5665,9 +5664,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.34.6.tgz", - "integrity": "sha512-YSwyOqlDAdKqs0iKuqvRHLN4SrD2TiswfoLfvYXseKbL47ht1grQpq46MSiQAx6rQEN8o8URtpXARCpqabqxGQ==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.34.8.tgz", + "integrity": "sha512-IQNVXL9iY6NniYbTaOKdrlVP3XIqazBgJOVkddzJlqnCpRi/yAeSOa8PLcECFSQochzqApIOE1GHNu3pCz+BDA==", "cpu": [ "arm64" ], @@ -5678,9 +5677,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.34.6.tgz", - "integrity": "sha512-HEP4CgPAY1RxXwwL5sPFv6BBM3tVeLnshF03HMhJYCNc6kvSqBgTMmsEjb72RkZBAWIqiPUyF1JpEBv5XT9wKQ==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.34.8.tgz", + "integrity": "sha512-TYXcHghgnCqYFiE3FT5QwXtOZqDj5GmaFNTNt3jNC+vh22dc/ukG2cG+pi75QO4kACohZzidsq7yKTKwq/Jq7Q==", "cpu": [ "x64" ], @@ -5691,9 +5690,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.34.6.tgz", - "integrity": "sha512-88fSzjC5xeH9S2Vg3rPgXJULkHcLYMkh8faix8DX4h4TIAL65ekwuQMA/g2CXq8W+NJC43V6fUpYZNjaX3+IIg==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.34.8.tgz", + "integrity": "sha512-A4iphFGNkWRd+5m3VIGuqHnG3MVnqKe7Al57u9mwgbyZ2/xF9Jio72MaY7xxh+Y87VAHmGQr73qoKL9HPbXj1g==", "cpu": [ "arm" ], @@ -5704,9 +5703,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.34.6.tgz", - "integrity": "sha512-wM4ztnutBqYFyvNeR7Av+reWI/enK9tDOTKNF+6Kk2Q96k9bwhDDOlnCUNRPvromlVXo04riSliMBs/Z7RteEg==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.34.8.tgz", + "integrity": "sha512-S0lqKLfTm5u+QTxlFiAnb2J/2dgQqRy/XvziPtDd1rKZFXHTyYLoVL58M/XFwDI01AQCDIevGLbQrMAtdyanpA==", "cpu": [ "arm" ], @@ -5717,9 +5716,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.34.6.tgz", - "integrity": "sha512-9RyprECbRa9zEjXLtvvshhw4CMrRa3K+0wcp3KME0zmBe1ILmvcVHnypZ/aIDXpRyfhSYSuN4EPdCCj5Du8FIA==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.34.8.tgz", + "integrity": "sha512-jpz9YOuPiSkL4G4pqKrus0pn9aYwpImGkosRKwNi+sJSkz+WU3anZe6hi73StLOQdfXYXC7hUfsQlTnjMd3s1A==", "cpu": [ "arm64" ], @@ -5730,9 +5729,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.34.6.tgz", - "integrity": "sha512-qTmklhCTyaJSB05S+iSovfo++EwnIEZxHkzv5dep4qoszUMX5Ca4WM4zAVUMbfdviLgCSQOu5oU8YoGk1s6M9Q==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.34.8.tgz", + "integrity": "sha512-KdSfaROOUJXgTVxJNAZ3KwkRc5nggDk+06P6lgi1HLv1hskgvxHUKZ4xtwHkVYJ1Rep4GNo+uEfycCRRxht7+Q==", "cpu": [ "arm64" ], @@ -5743,9 +5742,9 @@ ] }, "node_modules/@rollup/rollup-linux-loongarch64-gnu": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.34.6.tgz", - "integrity": "sha512-4Qmkaps9yqmpjY5pvpkfOerYgKNUGzQpFxV6rnS7c/JfYbDSU0y6WpbbredB5cCpLFGJEqYX40WUmxMkwhWCjw==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.34.8.tgz", + "integrity": "sha512-NyF4gcxwkMFRjgXBM6g2lkT58OWztZvw5KkV2K0qqSnUEqCVcqdh2jN4gQrTn/YUpAcNKyFHfoOZEer9nwo6uQ==", "cpu": [ "loong64" ], @@ -5756,9 +5755,9 @@ ] }, "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.34.6.tgz", - "integrity": "sha512-Zsrtux3PuaxuBTX/zHdLaFmcofWGzaWW1scwLU3ZbW/X+hSsFbz9wDIp6XvnT7pzYRl9MezWqEqKy7ssmDEnuQ==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.34.8.tgz", + "integrity": "sha512-LMJc999GkhGvktHU85zNTDImZVUCJ1z/MbAJTnviiWmmjyckP5aQsHtcujMjpNdMZPT2rQEDBlJfubhs3jsMfw==", "cpu": [ "ppc64" ], @@ -5769,9 +5768,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.34.6.tgz", - "integrity": "sha512-aK+Zp+CRM55iPrlyKiU3/zyhgzWBxLVrw2mwiQSYJRobCURb781+XstzvA8Gkjg/hbdQFuDw44aUOxVQFycrAg==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.34.8.tgz", + "integrity": "sha512-xAQCAHPj8nJq1PI3z8CIZzXuXCstquz7cIOL73HHdXiRcKk8Ywwqtx2wrIy23EcTn4aZ2fLJNBB8d0tQENPCmw==", "cpu": [ "riscv64" ], @@ -5782,9 +5781,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.34.6.tgz", - "integrity": "sha512-WoKLVrY9ogmaYPXwTH326+ErlCIgMmsoRSx6bO+l68YgJnlOXhygDYSZe/qbUJCSiCiZAQ+tKm88NcWuUXqOzw==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.34.8.tgz", + "integrity": "sha512-DdePVk1NDEuc3fOe3dPPTb+rjMtuFw89gw6gVWxQFAuEqqSdDKnrwzZHrUYdac7A7dXl9Q2Vflxpme15gUWQFA==", "cpu": [ "s390x" ], @@ -5795,9 +5794,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.34.6.tgz", - "integrity": "sha512-Sht4aFvmA4ToHd2vFzwMFaQCiYm2lDFho5rPcvPBT5pCdC+GwHG6CMch4GQfmWTQ1SwRKS0dhDYb54khSrjDWw==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.34.8.tgz", + "integrity": "sha512-8y7ED8gjxITUltTUEJLQdgpbPh1sUQ0kMTmufRF/Ns5tI9TNMNlhWtmPKKHCU0SilX+3MJkZ0zERYYGIVBYHIA==", "cpu": [ "x64" ], @@ -5808,9 +5807,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.34.6.tgz", - "integrity": "sha512-zmmpOQh8vXc2QITsnCiODCDGXFC8LMi64+/oPpPx5qz3pqv0s6x46ps4xoycfUiVZps5PFn1gksZzo4RGTKT+A==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.34.8.tgz", + "integrity": "sha512-SCXcP0ZpGFIe7Ge+McxY5zKxiEI5ra+GT3QRxL0pMMtxPfpyLAKleZODi1zdRHkz5/BhueUrYtYVgubqe9JBNQ==", "cpu": [ "x64" ], @@ -5821,9 +5820,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.34.6.tgz", - "integrity": "sha512-3/q1qUsO/tLqGBaD4uXsB6coVGB3usxw3qyeVb59aArCgedSF66MPdgRStUd7vbZOsko/CgVaY5fo2vkvPLWiA==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.34.8.tgz", + "integrity": "sha512-YHYsgzZgFJzTRbth4h7Or0m5O74Yda+hLin0irAIobkLQFRQd1qWmnoVfwmKm9TXIZVAD0nZ+GEb2ICicLyCnQ==", "cpu": [ "arm64" ], @@ -5834,9 +5833,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.34.6.tgz", - "integrity": "sha512-oLHxuyywc6efdKVTxvc0135zPrRdtYVjtVD5GUm55I3ODxhU/PwkQFD97z16Xzxa1Fz0AEe4W/2hzRtd+IfpOA==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.34.8.tgz", + "integrity": "sha512-r3NRQrXkHr4uWy5TOjTpTYojR9XmF0j/RYgKCef+Ag46FWUTltm5ziticv8LdNsDMehjJ543x/+TJAek/xBA2w==", "cpu": [ "ia32" ], @@ -5847,9 +5846,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.34.6.tgz", - "integrity": "sha512-0PVwmgzZ8+TZ9oGBmdZoQVXflbvuwzN/HRclujpl4N/q3i+y0lqLw8n1bXA8ru3sApDjlmONaNAuYr38y1Kr9w==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.34.8.tgz", + "integrity": "sha512-U0FaE5O1BCpZSeE6gBl3c5ObhePQSfk9vDRToMmTkbhCOgW4jqvtS5LGyQ76L1fH8sM0keRp4uDTsbjiUyjk0g==", "cpu": [ "x64" ], @@ -6140,9 +6139,9 @@ } }, "node_modules/@tanstack/query-core": { - "version": "5.66.0", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.66.0.tgz", - "integrity": "sha512-J+JeBtthiKxrpzUu7rfIPDzhscXF2p5zE/hVdrqkACBP8Yu0M96mwJ5m/8cPPYQE9aRNvXztXHlNwIh4FEeMZw==", + "version": "5.66.3", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.66.3.tgz", + "integrity": "sha512-+2iDxH7UFdtwcry766aJszGmbByQDIzTltJ3oQAZF9bhCxHCIN3yDwHa6qDCZxcpMGvUphCRx/RYJvLbM8mucQ==", "license": "MIT", "funding": { "type": "github", @@ -6150,12 +6149,12 @@ } }, "node_modules/@tanstack/react-query": { - "version": "5.66.0", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.66.0.tgz", - "integrity": "sha512-z3sYixFQJe8hndFnXgWu7C79ctL+pI0KAelYyW+khaNJ1m22lWrhJU2QrsTcRKMuVPtoZvfBYrTStIdKo+x0Xw==", + "version": "5.66.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.66.3.tgz", + "integrity": "sha512-sWMvxZ5VugPDgD1CzP7f0s9yFvjcXP3FXO5IVV2ndXlYqUCwykU8U69Kk05Qn5UvGRqB/gtj4J7vcTC6vtLHtQ==", "license": "MIT", "dependencies": { - "@tanstack/query-core": "5.66.0" + "@tanstack/query-core": "5.66.3" }, "funding": { "type": "github", @@ -6374,12 +6373,6 @@ "@types/estree": "*" } }, - "node_modules/@types/gensync": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@types/gensync/-/gensync-1.0.4.tgz", - "integrity": "sha512-C3YYeRQWp2fmq9OryX+FoDy8nXS6scQ7dPptD8LnFDAUNcKWJjXQKDNJD3HVm+kOUsXhTOkpi69vI4EuAr95bA==", - "license": "MIT" - }, "node_modules/@types/hast": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", @@ -6427,9 +6420,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.13.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.2.tgz", - "integrity": "sha512-Z+r8y3XL9ZpI2EY52YYygAFmo2/oWfNSj4BCpAXE2McAexDk8VcnBMGC9Djn9gTKt4d2T/hhXqmPzo4hfIXtTg==", + "version": "22.13.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.4.tgz", + "integrity": "sha512-ywP2X0DYtX3y08eFVx5fNIw7/uIv8hYUKgXoK8oayJlLnKcRfEYCxWMVE1XagUdVtCJlZT1AU4LXEABW+L1Peg==", "devOptional": true, "license": "MIT", "dependencies": { @@ -6437,18 +6430,18 @@ } }, "node_modules/@types/react": { - "version": "19.0.8", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.8.tgz", - "integrity": "sha512-9P/o1IGdfmQxrujGbIMDyYaaCykhLKc0NGCtYcECNUr9UAaDe4gwvV9bR6tvd5Br1SG0j+PBpbKr2UYY8CwqSw==", + "version": "19.0.10", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.10.tgz", + "integrity": "sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g==", "license": "MIT", "dependencies": { "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { - "version": "19.0.3", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.0.3.tgz", - "integrity": "sha512-0Knk+HJiMP/qOZgMyNFamlIjw9OFCsyC2ZbigmEEyXXixgre6IQpm/4V+r3qH4GC1JPvRJKInw+on2rV6YZLeA==", + "version": "19.0.4", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.0.4.tgz", + "integrity": "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg==", "dev": true, "license": "MIT", "peerDependencies": { @@ -7776,9 +7769,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001699", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001699.tgz", - "integrity": "sha512-b+uH5BakXZ9Do9iK+CkDmctUSEqZl+SP056vc5usa0PL+ev5OHw003rZXcnjNDv3L8P5j6rwT6C0BPKSikW08w==", + "version": "1.0.30001700", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001700.tgz", + "integrity": "sha512-2S6XIXwaE7K7erT8dY+kLQcpa5ms63XlRkMkReXjle+kf6c5g38vyMl+Z5y8dSxOFDhcFe+nxnn261PLxBSQsQ==", "funding": [ { "type": "opencollective", @@ -7806,9 +7799,9 @@ } }, "node_modules/chai": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.2.tgz", - "integrity": "sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", + "integrity": "sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==", "dev": true, "license": "MIT", "dependencies": { @@ -8753,9 +8746,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.98", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.98.tgz", - "integrity": "sha512-bI/LbtRBxU2GzK7KK5xxFd2y9Lf9XguHooPYbcXWy6wUoT8NMnffsvRhPmSeUHLSDKAEtKuTaEtK4Ms15zkIEA==", + "version": "1.5.101", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.101.tgz", + "integrity": "sha512-L0ISiQrP/56Acgu4/i/kfPwWSgrzYZUnQrC0+QPFuhqlLP1Ir7qzPPDVS9BcKIyWTRU8+o6CC8dKw38tSWhYIA==", "license": "ISC" }, "node_modules/emoji-regex": { @@ -9028,7 +9021,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0", @@ -10160,13 +10152,14 @@ } }, "node_modules/form-data": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz", - "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", + "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", "mime-types": "^2.1.12" }, "engines": { @@ -10205,9 +10198,9 @@ } }, "node_modules/framer-motion": { - "version": "12.4.2", - "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.4.2.tgz", - "integrity": "sha512-pW307cQKjDqEuO1flEoIFf6TkuJRfKr+c7qsHAJhDo4368N/5U8/7WU8J+xhd9+gjmOgJfgp+46evxRRFM39dA==", + "version": "12.4.3", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.4.3.tgz", + "integrity": "sha512-rsMeO7w3dKyNG09o3cGwSH49iHU+VgDmfSSfsX+wfkO3zDA6WWkh4sUsMXd155YROjZP+7FTIhDrBYfgZeHjKQ==", "license": "MIT", "dependencies": { "motion-dom": "^12.0.0", @@ -10645,7 +10638,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "dev": true, "license": "MIT", "dependencies": { "has-symbols": "^1.0.3" @@ -10970,9 +10962,9 @@ } }, "node_modules/i18next-browser-languagedetector": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/i18next-browser-languagedetector/-/i18next-browser-languagedetector-8.0.2.tgz", - "integrity": "sha512-shBvPmnIyZeD2VU5jVGIOWP7u9qNG3Lj7mpaiPFpbJ3LVfHZJvVzKR4v1Cb91wAOFpNw442N+LGPzHOHsten2g==", + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/i18next-browser-languagedetector/-/i18next-browser-languagedetector-8.0.3.tgz", + "integrity": "sha512-beOOLArattPBc2YZG5IXGJytdYFgUR7cS8Wd6HT4IczIoWKgmTspOQ2yasaGklelVo5seLPmnEKvLHR+E/MdWQ==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.23.2" @@ -14542,9 +14534,9 @@ "license": "MIT" }, "node_modules/posthog-js": { - "version": "1.217.4", - "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.217.4.tgz", - "integrity": "sha512-ZIOb75F1pdMZl6e7C4mgH2accKArLA2RG3zMEjeils+3J/cylwgcr2Iw0QtzSLqQVvR7AFRRbXMZXUWsiB2zyA==", + "version": "1.219.0", + "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.219.0.tgz", + "integrity": "sha512-RnjtcjI4UYTBsjfF4Fs1lICWmGjiqMU9H0fN2ab1BEcDOFL/2m9Fx/1viCxvMiQR8cmgWWpkipJXD0gY7czDOA==", "license": "MIT", "dependencies": { "core-js": "^3.38.1", @@ -14560,9 +14552,9 @@ "license": "Apache-2.0" }, "node_modules/preact": { - "version": "10.25.4", - "resolved": "https://registry.npmjs.org/preact/-/preact-10.25.4.tgz", - "integrity": "sha512-jLdZDb+Q+odkHJ+MpW/9U5cODzqnB+fy2EiHSZES7ldV5LK7yjlVzTp7R8Xy6W6y75kfK8iWYtFVH7lvjwrCMA==", + "version": "10.26.0", + "resolved": "https://registry.npmjs.org/preact/-/preact-10.26.0.tgz", + "integrity": "sha512-6ugi/Mb7lyV5RA6KlnijFyDLMU253i7L0RRiObIzDoqj59KT9iTeNJbA/YGw6M7jP4vxaab0DOA8DgodTOA6EQ==", "license": "MIT", "funding": { "type": "opencollective", @@ -14874,9 +14866,9 @@ } }, "node_modules/react-hot-toast": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/react-hot-toast/-/react-hot-toast-2.5.1.tgz", - "integrity": "sha512-54Gq1ZD1JbmAb4psp9bvFHjS7lje+8ubboUmvKZkCsQBLH6AOpZ9JemfRvIdHcfb9AZXRaFLrb3qUobGYDJhFQ==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/react-hot-toast/-/react-hot-toast-2.5.2.tgz", + "integrity": "sha512-Tun3BbCxzmXXM7C+NI4qiv6lT0uwGh4oAfeJyNOjYUejTsm35mK9iCaYLGv8cBz9L5YxZLx/2ii7zsIwPtPUdw==", "license": "MIT", "dependencies": { "csstype": "^3.1.3", @@ -15086,9 +15078,9 @@ "license": "MIT" }, "node_modules/readdirp": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.1.tgz", - "integrity": "sha512-h80JrZu/MHUZCyHu5ciuoI0+WxsCxzxJTILn6Fs8rxSnFPh+UVHYfeIxK1nVGugMqkfC4vJcBOYbkfkwYK0+gw==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", "dev": true, "license": "MIT", "engines": { @@ -15537,9 +15529,9 @@ } }, "node_modules/rollup": { - "version": "4.34.6", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.6.tgz", - "integrity": "sha512-wc2cBWqJgkU3Iz5oztRkQbfVkbxoz5EhnCGOrnJvnLnQ7O0WhQUYyv18qQI79O8L7DdHrrlJNeCHd4VGpnaXKQ==", + "version": "4.34.8", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.8.tgz", + "integrity": "sha512-489gTVMzAYdiZHFVA/ig/iYFllCcWFHMvUHI1rpFmkoUtRlQxqh6/yiNqnYibjMZ2b/+FUQwldG+aLsEt6bglQ==", "license": "MIT", "dependencies": { "@types/estree": "1.0.6" @@ -15552,25 +15544,25 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.34.6", - "@rollup/rollup-android-arm64": "4.34.6", - "@rollup/rollup-darwin-arm64": "4.34.6", - "@rollup/rollup-darwin-x64": "4.34.6", - "@rollup/rollup-freebsd-arm64": "4.34.6", - "@rollup/rollup-freebsd-x64": "4.34.6", - "@rollup/rollup-linux-arm-gnueabihf": "4.34.6", - "@rollup/rollup-linux-arm-musleabihf": "4.34.6", - "@rollup/rollup-linux-arm64-gnu": "4.34.6", - "@rollup/rollup-linux-arm64-musl": "4.34.6", - "@rollup/rollup-linux-loongarch64-gnu": "4.34.6", - "@rollup/rollup-linux-powerpc64le-gnu": "4.34.6", - "@rollup/rollup-linux-riscv64-gnu": "4.34.6", - "@rollup/rollup-linux-s390x-gnu": "4.34.6", - "@rollup/rollup-linux-x64-gnu": "4.34.6", - "@rollup/rollup-linux-x64-musl": "4.34.6", - "@rollup/rollup-win32-arm64-msvc": "4.34.6", - "@rollup/rollup-win32-ia32-msvc": "4.34.6", - "@rollup/rollup-win32-x64-msvc": "4.34.6", + "@rollup/rollup-android-arm-eabi": "4.34.8", + "@rollup/rollup-android-arm64": "4.34.8", + "@rollup/rollup-darwin-arm64": "4.34.8", + "@rollup/rollup-darwin-x64": "4.34.8", + "@rollup/rollup-freebsd-arm64": "4.34.8", + "@rollup/rollup-freebsd-x64": "4.34.8", + "@rollup/rollup-linux-arm-gnueabihf": "4.34.8", + "@rollup/rollup-linux-arm-musleabihf": "4.34.8", + "@rollup/rollup-linux-arm64-gnu": "4.34.8", + "@rollup/rollup-linux-arm64-musl": "4.34.8", + "@rollup/rollup-linux-loongarch64-gnu": "4.34.8", + "@rollup/rollup-linux-powerpc64le-gnu": "4.34.8", + "@rollup/rollup-linux-riscv64-gnu": "4.34.8", + "@rollup/rollup-linux-s390x-gnu": "4.34.8", + "@rollup/rollup-linux-x64-gnu": "4.34.8", + "@rollup/rollup-linux-x64-musl": "4.34.8", + "@rollup/rollup-win32-arm64-msvc": "4.34.8", + "@rollup/rollup-win32-ia32-msvc": "4.34.8", + "@rollup/rollup-win32-x64-msvc": "4.34.8", "fsevents": "~2.3.2" } }, @@ -17105,9 +17097,9 @@ } }, "node_modules/type-fest": { - "version": "4.34.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.34.1.tgz", - "integrity": "sha512-6kSc32kT0rbwxD6QL1CYe8IqdzN/J/ILMrNK+HMQCKH3insCDRY/3ITb0vcBss0a3t72fzh2YSzj8ko1HgwT3g==", + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.35.0.tgz", + "integrity": "sha512-2/AwEFQDFEy30iOLjrvHDIH7e4HEWH+f1Yl1bI5XMqzuoCUqwYCdxachgsgv0og/JdVZUhbfjcJAoHj5L1753A==", "dev": true, "license": "(MIT OR CC0-1.0)", "engines": { diff --git a/frontend/package.json b/frontend/package.json index 2781b75eabbc..6049777c09df 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -22,12 +22,12 @@ "eslint-config-airbnb-typescript": "^18.0.0", "framer-motion": "^12.4.2", "i18next": "^24.2.2", - "i18next-browser-languagedetector": "^8.0.2", + "i18next-browser-languagedetector": "^8.0.3", "i18next-http-backend": "^3.0.2", "isbot": "^5.1.22", "jose": "^5.9.4", "monaco-editor": "^0.52.2", - "posthog-js": "^1.217.2", + "posthog-js": "^1.217.6", "react": "^19.0.0", "react-dom": "^19.0.0", "react-highlight": "^0.15.0", @@ -85,7 +85,7 @@ "@testing-library/jest-dom": "^6.6.1", "@testing-library/react": "^16.2.0", "@testing-library/user-event": "^14.6.1", - "@types/node": "^22.13.2", + "@types/node": "^22.13.4", "@types/react": "^19.0.8", "@types/react-dom": "^19.0.3", "@types/react-highlight": "^0.12.8", From 0cbf50576d9b9b60b5722caf181a007f8c08ed61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=B8=88=E8=83=A1?= Date: Mon, 17 Feb 2025 22:11:32 +0800 Subject: [PATCH 22/44] docs(runtime): fix broken links of benchmarks (#6744) Co-authored-by: jianhao1 --- openhands/runtime/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openhands/runtime/README.md b/openhands/runtime/README.md index 5a4c1bd0f4fa..10df124e83a7 100644 --- a/openhands/runtime/README.md +++ b/openhands/runtime/README.md @@ -145,7 +145,7 @@ Key features: - Support for cloud-based deployments - Potential for improved security through isolation -At the time of this writing, this is mostly used in parallel evaluation, such as this example for [SWE-Bench](https://github.com/All-Hands-AI/OpenHands/tree/main/evaluation/swe_bench#run-inference-on-remoteruntime-experimental). +At the time of this writing, this is mostly used in parallel evaluation, such as this example for [SWE-Bench](https://github.com/All-Hands-AI/OpenHands/tree/main/evaluation/benchmarks/swe_bench#run-inference-on-remoteruntime-experimental). ## Related Components From 265e8ae5f4edd4f24f3742a26ce93baa82e8a05b Mon Sep 17 00:00:00 2001 From: tofarr Date: Mon, 17 Feb 2025 14:33:36 +0000 Subject: [PATCH 23/44] feat: implement optimistic updates for conversation deletion (#6745) Co-authored-by: openhands Co-authored-by: sp.wack <83104063+amanape@users.noreply.github.com> --- .../confirm-delete-modal.tsx | 5 +++- .../conversation-panel/conversation-card.tsx | 1 + .../conversation-panel/conversation-panel.tsx | 21 ++++++++++----- .../hooks/mutation/use-delete-conversation.ts | 27 ++++++++++++++++++- 4 files changed, 45 insertions(+), 9 deletions(-) diff --git a/frontend/src/components/features/conversation-panel/confirm-delete-modal.tsx b/frontend/src/components/features/conversation-panel/confirm-delete-modal.tsx index 4dd7c183be09..a649dc4ef878 100644 --- a/frontend/src/components/features/conversation-panel/confirm-delete-modal.tsx +++ b/frontend/src/components/features/conversation-panel/confirm-delete-modal.tsx @@ -22,7 +22,10 @@ export function ConfirmDeleteModal({
-
+
event.stopPropagation()} + > ) => { diff --git a/frontend/src/components/features/conversation-panel/conversation-panel.tsx b/frontend/src/components/features/conversation-panel/conversation-panel.tsx index a96c649a54d8..d91a70755ba1 100644 --- a/frontend/src/components/features/conversation-panel/conversation-panel.tsx +++ b/frontend/src/components/features/conversation-panel/conversation-panel.tsx @@ -44,12 +44,16 @@ export function ConversationPanel({ onClose }: ConversationPanelProps) { const handleConfirmDelete = () => { if (selectedConversationId) { - deleteConversation({ conversationId: selectedConversationId }); - setConfirmDeleteModalVisible(false); - - if (cid === selectedConversationId) { - endSession(); - } + deleteConversation( + { conversationId: selectedConversationId }, + { + onSuccess: () => { + if (cid === selectedConversationId) { + endSession(); + } + }, + }, + ); } }; @@ -110,7 +114,10 @@ export function ConversationPanel({ onClose }: ConversationPanelProps) { {confirmDeleteModalVisible && ( { + handleConfirmDelete(); + setConfirmDeleteModalVisible(false); + }} onCancel={() => setConfirmDeleteModalVisible(false)} /> )} diff --git a/frontend/src/hooks/mutation/use-delete-conversation.ts b/frontend/src/hooks/mutation/use-delete-conversation.ts index b0e3d6c90e58..cedc5475caae 100644 --- a/frontend/src/hooks/mutation/use-delete-conversation.ts +++ b/frontend/src/hooks/mutation/use-delete-conversation.ts @@ -7,7 +7,32 @@ export const useDeleteConversation = () => { return useMutation({ mutationFn: (variables: { conversationId: string }) => OpenHands.deleteUserConversation(variables.conversationId), - onSuccess: () => { + onMutate: async (variables) => { + await queryClient.cancelQueries({ queryKey: ["user", "conversations"] }); + const previousConversations = queryClient.getQueryData([ + "user", + "conversations", + ]); + + queryClient.setQueryData( + ["user", "conversations"], + (old: { conversation_id: string }[] | undefined) => + old?.filter( + (conv) => conv.conversation_id !== variables.conversationId, + ), + ); + + return { previousConversations }; + }, + onError: (err, variables, context) => { + if (context?.previousConversations) { + queryClient.setQueryData( + ["user", "conversations"], + context.previousConversations, + ); + } + }, + onSettled: () => { queryClient.invalidateQueries({ queryKey: ["user", "conversations"] }); }, }); From 82b5325792d547eb86adaf43811ca2833d248d13 Mon Sep 17 00:00:00 2001 From: tofarr Date: Mon, 17 Feb 2025 15:11:13 +0000 Subject: [PATCH 24/44] Added iterate method and additional tests for search functions (#6756) --- openhands/utils/search_utils.py | 14 +++ tests/unit/test_file_conversation_store.py | 122 +++++++++++++++++++++ tests/unit/test_search_utils.py | 115 ++++++++++++++++++- 3 files changed, 250 insertions(+), 1 deletion(-) diff --git a/openhands/utils/search_utils.py b/openhands/utils/search_utils.py index 315d0775c0c4..b7714249f875 100644 --- a/openhands/utils/search_utils.py +++ b/openhands/utils/search_utils.py @@ -1,4 +1,5 @@ import base64 +from typing import AsyncIterator, Callable def offset_to_page_id(offset: int, has_next: bool) -> str | None: @@ -13,3 +14,16 @@ def page_id_to_offset(page_id: str | None) -> int: return 0 offset = int(base64.b64decode(page_id).decode()) return offset + + +async def iterate(fn: Callable, **kwargs) -> AsyncIterator: + """Iterate over paged result sets. Assumes that the results sets contain an array of result objects, and a next_page_id""" + kwargs = {**kwargs} + kwargs['page_id'] = None + while True: + result_set = await fn(**kwargs) + for result in result_set.results: + yield result + if result_set.next_page_id is None: + return + kwargs['page_id'] = result_set.next_page_id diff --git a/tests/unit/test_file_conversation_store.py b/tests/unit/test_file_conversation_store.py index 323f20de7780..80c391dacafb 100644 --- a/tests/unit/test_file_conversation_store.py +++ b/tests/unit/test_file_conversation_store.py @@ -40,3 +40,125 @@ async def test_load_int_user_id(): ) found = await store.get_metadata('some-conversation-id') assert found.github_user_id == '12345' + + +@pytest.mark.asyncio +async def test_search_empty(): + store = FileConversationStore(InMemoryFileStore({})) + result = await store.search() + assert len(result.results) == 0 + assert result.next_page_id is None + + +@pytest.mark.asyncio +async def test_search_basic(): + # Create test data with 3 conversations at different dates + store = FileConversationStore( + InMemoryFileStore( + { + 'sessions/conv1/metadata.json': json.dumps( + { + 'conversation_id': 'conv1', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'First conversation', + 'created_at': '2025-01-16T19:51:04Z', + } + ), + 'sessions/conv2/metadata.json': json.dumps( + { + 'conversation_id': 'conv2', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'Second conversation', + 'created_at': '2025-01-17T19:51:04Z', + } + ), + 'sessions/conv3/metadata.json': json.dumps( + { + 'conversation_id': 'conv3', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'Third conversation', + 'created_at': '2025-01-15T19:51:04Z', + } + ), + } + ) + ) + + result = await store.search() + assert len(result.results) == 3 + # Should be sorted by date, newest first + assert result.results[0].conversation_id == 'conv2' + assert result.results[1].conversation_id == 'conv1' + assert result.results[2].conversation_id == 'conv3' + assert result.next_page_id is None + + +@pytest.mark.asyncio +async def test_search_pagination(): + # Create test data with 5 conversations + store = FileConversationStore( + InMemoryFileStore( + { + f'sessions/conv{i}/metadata.json': json.dumps( + { + 'conversation_id': f'conv{i}', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': f'Conversation {i}', + 'created_at': f'2025-01-{15+i}T19:51:04Z', + } + ) + for i in range(1, 6) + } + ) + ) + + # Test with limit of 2 + result = await store.search(limit=2) + assert len(result.results) == 2 + assert result.results[0].conversation_id == 'conv5' # newest first + assert result.results[1].conversation_id == 'conv4' + assert result.next_page_id is not None + + # Get next page using the next_page_id + result2 = await store.search(page_id=result.next_page_id, limit=2) + assert len(result2.results) == 2 + assert result2.results[0].conversation_id == 'conv3' + assert result2.results[1].conversation_id == 'conv2' + assert result2.next_page_id is not None + + # Get last page + result3 = await store.search(page_id=result2.next_page_id, limit=2) + assert len(result3.results) == 1 + assert result3.results[0].conversation_id == 'conv1' + assert result3.next_page_id is None + + +@pytest.mark.asyncio +async def test_search_with_invalid_conversation(): + # Test handling of invalid conversation data + store = FileConversationStore( + InMemoryFileStore( + { + 'sessions/conv1/metadata.json': json.dumps( + { + 'conversation_id': 'conv1', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'Valid conversation', + 'created_at': '2025-01-16T19:51:04Z', + } + ), + 'sessions/conv2/metadata.json': 'invalid json', # Invalid conversation + } + ) + ) + + result = await store.search() + # Should return only the valid conversation + assert len(result.results) == 1 + assert result.results[0].conversation_id == 'conv1' + assert result.next_page_id is None diff --git a/tests/unit/test_search_utils.py b/tests/unit/test_search_utils.py index 3e68dbfab79f..d9e2830b6bf1 100644 --- a/tests/unit/test_search_utils.py +++ b/tests/unit/test_search_utils.py @@ -1,4 +1,10 @@ -from openhands.utils.search_utils import offset_to_page_id, page_id_to_offset +import json + +import pytest + +from openhands.storage.conversation.file_conversation_store import FileConversationStore +from openhands.storage.memory import InMemoryFileStore +from openhands.utils.search_utils import iterate, offset_to_page_id, page_id_to_offset def test_offset_to_page_id(): @@ -22,3 +28,110 @@ def test_bidirectional_conversion(): for offset in test_offsets: page_id = offset_to_page_id(offset, True) assert page_id_to_offset(page_id) == offset + + +@pytest.mark.asyncio +async def test_iterate_empty(): + store = FileConversationStore(InMemoryFileStore({})) + results = [] + async for result in iterate(store.search): + results.append(result) + assert len(results) == 0 + + +@pytest.mark.asyncio +async def test_iterate_single_page(): + store = FileConversationStore( + InMemoryFileStore( + { + 'sessions/conv1/metadata.json': json.dumps( + { + 'conversation_id': 'conv1', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'First conversation', + 'created_at': '2025-01-16T19:51:04Z', + } + ), + 'sessions/conv2/metadata.json': json.dumps( + { + 'conversation_id': 'conv2', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'Second conversation', + 'created_at': '2025-01-17T19:51:04Z', + } + ), + } + ) + ) + + results = [] + async for result in iterate(store.search): + results.append(result) + + assert len(results) == 2 + assert results[0].conversation_id == 'conv2' # newest first + assert results[1].conversation_id == 'conv1' + + +@pytest.mark.asyncio +async def test_iterate_multiple_pages(): + # Create test data with 5 conversations + store = FileConversationStore( + InMemoryFileStore( + { + f'sessions/conv{i}/metadata.json': json.dumps( + { + 'conversation_id': f'conv{i}', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': f'Conversation {i}', + 'created_at': f'2025-01-{15+i}T19:51:04Z', + } + ) + for i in range(1, 6) + } + ) + ) + + results = [] + async for result in iterate(store.search, limit=2): + results.append(result) + + assert len(results) == 5 + # Should be sorted by date, newest first + assert [r.conversation_id for r in results] == [ + 'conv5', + 'conv4', + 'conv3', + 'conv2', + 'conv1', + ] + + +@pytest.mark.asyncio +async def test_iterate_with_invalid_conversation(): + store = FileConversationStore( + InMemoryFileStore( + { + 'sessions/conv1/metadata.json': json.dumps( + { + 'conversation_id': 'conv1', + 'github_user_id': '123', + 'selected_repository': 'repo1', + 'title': 'Valid conversation', + 'created_at': '2025-01-16T19:51:04Z', + } + ), + 'sessions/conv2/metadata.json': 'invalid json', # Invalid conversation + } + ) + ) + + results = [] + async for result in iterate(store.search): + results.append(result) + + assert len(results) == 1 + assert results[0].conversation_id == 'conv1' From 3a478c23037383efedef617b476732e9da76fd25 Mon Sep 17 00:00:00 2001 From: Robert Brennan Date: Mon, 17 Feb 2025 10:36:59 -0500 Subject: [PATCH 25/44] Better LLM retry behavior (#6557) Co-authored-by: Engel Nyst --- docs/modules/usage/llms/llms.md | 14 ++++---- frontend/src/i18n/translation.json | 44 +++++++++++++++++++++++- openhands/controller/agent_controller.py | 11 ++++++ openhands/core/config/llm_config.py | 7 ++-- openhands/llm/llm.py | 14 +------- tests/unit/test_llm.py | 34 ------------------ tests/unit/test_llm_config.py | 2 +- 7 files changed, 67 insertions(+), 59 deletions(-) diff --git a/docs/modules/usage/llms/llms.md b/docs/modules/usage/llms/llms.md index 5e6a472d0c0a..f4fa118dd02e 100644 --- a/docs/modules/usage/llms/llms.md +++ b/docs/modules/usage/llms/llms.md @@ -63,22 +63,22 @@ We have a few guides for running OpenHands with specific model providers: ### API retries and rate limits LLM providers typically have rate limits, sometimes very low, and may require retries. OpenHands will automatically -retry requests if it receives a Rate Limit Error (429 error code), API connection error, or other transient errors. +retry requests if it receives a Rate Limit Error (429 error code). You can customize these options as you need for the provider you're using. Check their documentation, and set the following environment variables to control the number of retries and the time between retries: -- `LLM_NUM_RETRIES` (Default of 8) -- `LLM_RETRY_MIN_WAIT` (Default of 15 seconds) -- `LLM_RETRY_MAX_WAIT` (Default of 120 seconds) +- `LLM_NUM_RETRIES` (Default of 4 times) +- `LLM_RETRY_MIN_WAIT` (Default of 5 seconds) +- `LLM_RETRY_MAX_WAIT` (Default of 30 seconds) - `LLM_RETRY_MULTIPLIER` (Default of 2) If you are running OpenHands in development mode, you can also set these options in the `config.toml` file: ```toml [llm] -num_retries = 8 -retry_min_wait = 15 -retry_max_wait = 120 +num_retries = 4 +retry_min_wait = 5 +retry_max_wait = 30 retry_multiplier = 2 ``` diff --git a/frontend/src/i18n/translation.json b/frontend/src/i18n/translation.json index eaa0ccf43b8e..2cec18014f58 100644 --- a/frontend/src/i18n/translation.json +++ b/frontend/src/i18n/translation.json @@ -3803,6 +3803,37 @@ "pt": "Erro ao autenticar com o provedor LLM. Por favor, verifique sua chave API", "tr": "LLM sağlayıcısı ile kimlik doğrulama hatası. Lütfen API anahtarınızı kontrol edin" }, + "STATUS$ERROR_LLM_SERVICE_UNAVAILABLE": { + "en": "The LLM provider is currently unavailable. Please try again later.", + "es": "El proveedor LLM no está actualmente disponible. Por favor, inténtelo de nuevo más tarde.", + "zh-CN": "LLM提供商当前不可用", + "zh-TW": "LLM提供商目前無法使用", + "ko-KR": "LLM 공급자가 현재 사용 불가능합니다", + "ja": "LLMプロバイダーが現在利用できません。後でもう一度試してください。", + "no": "LLM-leverandøren er nå ikke tilgjengelig. Vennligst prøv igjen senere.", + "ar": "المزود LLM غير متاح حالياً. يرجى المحاولة مرة أخرى لاحقًا.", + "de": "Der LLM-Anbieter ist derzeit nicht verfügbar. Bitte versuchen Sie es später erneut.", + "fr": "Le fournisseur LLM n'est actuellement pas disponible. Veuillez réessayer plus tard.", + "it": "Il provider LLM non è attualmente disponibile. Per favore, riprova più tardi.", + "pt": "O provedor LLM não está atualmente disponível. Por favor, tente novamente mais tarde.", + "tr": "LLM sağlayıcısı şu anda kullanılamıyor. Lütfen daha sonra tekrar deneyin." + }, + "STATUS$ERROR_LLM_INTERNAL_SERVER_ERROR": { + "en": "The request failed with an internal server error.", + "es": "La solicitud falló con un error del servidor interno.", + "zh-CN": "请求失败,请稍后再试", + "zh-TW": "請求失敗,請稍後再試", + "ko-KR": "요청이 실패했습니다. 나중에 다시 시도해주세요.", + "ja": "リクエストが内部サーバーエラーで失敗しました。後でもう一度試してください。", + "no": "Det oppstod en feil ved tilkobling til kjøretidsmiljøet. Vennligst oppdater siden.", + "ar": "حدث خطأ أثناء الاتصال بوقت التشغيل. يرجى تحديث الصفحة.", + "de": "Beim Verbinden mit der Laufzeitumgebung ist ein Fehler aufgetreten. Bitte aktualisieren Sie die Seite.", + "fr": "Une erreur s'est produite lors de la connexion à l'environnement d'exécution. Veuillez rafraîchir la page.", + "it": "Si è verificato un errore durante la connessione al runtime. Aggiorna la pagina.", + "pt": "Ocorreu um erro ao conectar ao ambiente de execução. Por favor, atualize a página.", + "tr": "Çalışma zamanına bağlanırken bir hata oluştu. Lütfen sayfayı yenileyin." + }, + "STATUS$ERROR_RUNTIME_DISCONNECTED": { "en": "There was an error while connecting to the runtime. Please refresh the page.", "zh-CN": "运行时已断开连接", @@ -3820,7 +3851,18 @@ }, "STATUS$LLM_RETRY": { "en": "Retrying LLM request", - "zh-TW": "重新嘗試 LLM 請求中" + "es": "Reintentando solicitud LLM", + "zh-CN": "重试LLM请求", + "zh-TW": "重試LLM請求", + "ko-KR": "LLM 요청 재시도", + "ja": "LLM リクエストを再試行中", + "no": "Gjenforsøker LLM-forespørsel", + "ar": "يتم إعادة تحميل الطلب LLM", + "de": "LLM-Anfrage erneut versuchen", + "fr": "Réessayer la requête LLM", + "it": "Ritenta la richiesta LLM", + "pt": "Reintentando a solicitação LLM", + "tr": "LLM isteğini yeniden deniyor" }, "AGENT_ERROR$BAD_ACTION": { "en": "Agent tried to execute a malformed action.", diff --git a/openhands/controller/agent_controller.py b/openhands/controller/agent_controller.py index e5a0b24f9694..1e338810198a 100644 --- a/openhands/controller/agent_controller.py +++ b/openhands/controller/agent_controller.py @@ -214,6 +214,17 @@ async def _react_to_exception( err_id = '' if isinstance(e, litellm.AuthenticationError): err_id = 'STATUS$ERROR_LLM_AUTHENTICATION' + elif isinstance( + e, + ( + litellm.ServiceUnavailableError, + litellm.APIConnectionError, + litellm.APIError, + ), + ): + err_id = 'STATUS$ERROR_LLM_SERVICE_UNAVAILABLE' + elif isinstance(e, litellm.InternalServerError): + err_id = 'STATUS$ERROR_LLM_INTERNAL_SERVER_ERROR' elif isinstance(e, RateLimitError): await self.set_agent_state_to(AgentState.RATE_LIMITED) return diff --git a/openhands/core/config/llm_config.py b/openhands/core/config/llm_config.py index cb1581634da1..cee22766df14 100644 --- a/openhands/core/config/llm_config.py +++ b/openhands/core/config/llm_config.py @@ -59,10 +59,11 @@ class LLMConfig(BaseModel): aws_region_name: str | None = Field(default=None) openrouter_site_url: str = Field(default='https://docs.all-hands.dev/') openrouter_app_name: str = Field(default='OpenHands') - num_retries: int = Field(default=8) + # total wait time: 5 + 10 + 20 + 30 = 65 seconds + num_retries: int = Field(default=4) retry_multiplier: float = Field(default=2) - retry_min_wait: int = Field(default=15) - retry_max_wait: int = Field(default=120) + retry_min_wait: int = Field(default=5) + retry_max_wait: int = Field(default=30) timeout: int | None = Field(default=None) max_message_chars: int = Field( default=30_000 diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index b5fe67943467..a9071b43bed3 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -18,11 +18,7 @@ from litellm import completion as litellm_completion from litellm import completion_cost as litellm_completion_cost from litellm.exceptions import ( - APIConnectionError, - APIError, - InternalServerError, RateLimitError, - ServiceUnavailableError, ) from litellm.types.utils import CostPerToken, ModelResponse, Usage from litellm.utils import create_pretrained_tokenizer @@ -41,15 +37,7 @@ __all__ = ['LLM'] # tuple of exceptions to retry on -LLM_RETRY_EXCEPTIONS: tuple[type[Exception], ...] = ( - APIConnectionError, - # FIXME: APIError is useful on 502 from a proxy for example, - # but it also retries on other errors that are permanent - APIError, - InternalServerError, - RateLimitError, - ServiceUnavailableError, -) +LLM_RETRY_EXCEPTIONS: tuple[type[Exception], ...] = (RateLimitError,) # cache prompt supporting models # remove this when we gemini and deepseek are supported diff --git a/tests/unit/test_llm.py b/tests/unit/test_llm.py index 98783c050d0a..1bfee8550698 100644 --- a/tests/unit/test_llm.py +++ b/tests/unit/test_llm.py @@ -3,10 +3,7 @@ import pytest from litellm.exceptions import ( - APIConnectionError, - InternalServerError, RateLimitError, - ServiceUnavailableError, ) from openhands.core.config import LLMConfig @@ -187,21 +184,6 @@ def test_completion_with_mocked_logger( @pytest.mark.parametrize( 'exception_class,extra_args,expected_retries', [ - ( - APIConnectionError, - {'llm_provider': 'test_provider', 'model': 'test_model'}, - 2, - ), - ( - InternalServerError, - {'llm_provider': 'test_provider', 'model': 'test_model'}, - 2, - ), - ( - ServiceUnavailableError, - {'llm_provider': 'test_provider', 'model': 'test_model'}, - 2, - ), (RateLimitError, {'llm_provider': 'test_provider', 'model': 'test_model'}, 2), ], ) @@ -254,22 +236,6 @@ def test_completion_rate_limit_wait_time(mock_litellm_completion, default_config ), f'Expected wait time between {default_config.retry_min_wait} and {default_config.retry_max_wait} seconds, but got {wait_time}' -@patch('openhands.llm.llm.litellm_completion') -def test_completion_exhausts_retries(mock_litellm_completion, default_config): - mock_litellm_completion.side_effect = APIConnectionError( - 'Persistent error', llm_provider='test_provider', model='test_model' - ) - - llm = LLM(config=default_config) - with pytest.raises(APIConnectionError): - llm.completion( - messages=[{'role': 'user', 'content': 'Hello!'}], - stream=False, - ) - - assert mock_litellm_completion.call_count == llm.config.num_retries - - @patch('openhands.llm.llm.litellm_completion') def test_completion_operation_cancelled(mock_litellm_completion, default_config): mock_litellm_completion.side_effect = OperationCancelled('Operation cancelled') diff --git a/tests/unit/test_llm_config.py b/tests/unit/test_llm_config.py index 342112a44316..fd11deb98580 100644 --- a/tests/unit/test_llm_config.py +++ b/tests/unit/test_llm_config.py @@ -188,7 +188,7 @@ def test_load_from_toml_llm_missing_generic( assert custom_only.model == 'custom-only-model' assert custom_only.api_key.get_secret_value() == 'custom-only-api-key' assert custom_only.embedding_model == 'local' # default value - assert custom_only.num_retries == 8 # default value + assert custom_only.num_retries == 4 # default value def test_load_from_toml_llm_invalid_config( From ae31a24c29373ccda9f098952081a4a439fbcbc1 Mon Sep 17 00:00:00 2001 From: Robert Brennan Date: Mon, 17 Feb 2025 11:14:19 -0500 Subject: [PATCH 26/44] Fix caps in status message (#6761) --- frontend/src/i18n/translation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/i18n/translation.json b/frontend/src/i18n/translation.json index 2cec18014f58..70b9974dcb3c 100644 --- a/frontend/src/i18n/translation.json +++ b/frontend/src/i18n/translation.json @@ -3249,7 +3249,7 @@ "ja": "非公開" }, "STATUS$STARTING_RUNTIME": { - "en": "Starting Runtime...", + "en": "Starting runtime...", "zh-CN": "启动运行时...", "zh-TW": "啟動執行時...", "de": "Laufzeitumgebung wird gestartet...", From f4b123f73b93bf75d5e29ed5d2f4c147c6314a35 Mon Sep 17 00:00:00 2001 From: tofarr Date: Mon, 17 Feb 2025 17:23:43 +0000 Subject: [PATCH 27/44] Improve SensitiveDataFilter and add comprehensive tests (#6755) Co-authored-by: openhands --- openhands/core/logger.py | 32 ++++++---- tests/unit/test_logger.py | 117 +++++++++++++++++++++++++++++++++++++ tests/unit/test_logging.py | 45 ++++++-------- 3 files changed, 155 insertions(+), 39 deletions(-) create mode 100644 tests/unit/test_logger.py diff --git a/openhands/core/logger.py b/openhands/core/logger.py index b384fedac1d8..2f830c655a00 100644 --- a/openhands/core/logger.py +++ b/openhands/core/logger.py @@ -217,7 +217,21 @@ def _flush(self): class SensitiveDataFilter(logging.Filter): def filter(self, record): - # start with attributes + # Gather sensitive values which should not ever appear in the logs. + sensitive_values = [] + for key, value in os.environ.items(): + key_upper = key.upper() + if len(value) > 2 and any( + s in key_upper for s in ('SECRET', 'KEY', 'CODE', 'TOKEN') + ): + sensitive_values.append(value) + + # Replace sensitive values from env! + msg = record.getMessage() + for sensitive_value in sensitive_values: + msg = msg.replace(sensitive_value, '******') + + # Replace obvious sensitive values from log itself... sensitive_patterns = [ 'api_key', 'aws_access_key_id', @@ -227,28 +241,22 @@ def filter(self, record): 'jwt_secret', 'modal_api_token_id', 'modal_api_token_secret', + 'llm_api_key', + 'sandbox_env_github_token', ] # add env var names env_vars = [attr.upper() for attr in sensitive_patterns] sensitive_patterns.extend(env_vars) - # and some special cases - sensitive_patterns.append('JWT_SECRET') - sensitive_patterns.append('LLM_API_KEY') - sensitive_patterns.append('GITHUB_TOKEN') - sensitive_patterns.append('SANDBOX_ENV_GITHUB_TOKEN') - - # this also formats the message with % args - msg = record.getMessage() - record.args = () - for attr in sensitive_patterns: pattern = rf"{attr}='?([\w-]+)'?" msg = re.sub(pattern, f"{attr}='******'", msg) - # passed with msg + # Update the record record.msg = msg + record.args = () + return True diff --git a/tests/unit/test_logger.py b/tests/unit/test_logger.py new file mode 100644 index 000000000000..78c5661bc823 --- /dev/null +++ b/tests/unit/test_logger.py @@ -0,0 +1,117 @@ +import logging +from unittest.mock import patch + +from openhands.core.logger import SensitiveDataFilter + + +@patch.dict( + 'os.environ', + { + 'API_SECRET': 'super-secret-123', + 'AUTH_TOKEN': 'auth-token-456', + 'NORMAL_VAR': 'normal-value', + }, + clear=True, +) +def test_sensitive_data_filter_basic(): + # Create a filter instance + filter = SensitiveDataFilter() + + # Create a log record with sensitive data + record = logging.LogRecord( + name='test_logger', + level=logging.INFO, + pathname='test.py', + lineno=1, + msg='API Secret: super-secret-123, Token: auth-token-456, Normal: normal-value', + args=(), + exc_info=None, + ) + + # Apply the filter + filter.filter(record) + + # Check that sensitive data is masked but normal data isn't + assert '******' in record.msg + assert 'super-secret-123' not in record.msg + assert 'auth-token-456' not in record.msg + assert 'normal-value' in record.msg + + +@patch.dict('os.environ', {}, clear=True) +def test_sensitive_data_filter_empty_values(): + # Test with empty environment variables + filter = SensitiveDataFilter() + + record = logging.LogRecord( + name='test_logger', + level=logging.INFO, + pathname='test.py', + lineno=1, + msg='No sensitive data here', + args=(), + exc_info=None, + ) + + # Apply the filter + filter.filter(record) + + # Message should remain unchanged + assert record.msg == 'No sensitive data here' + + +@patch.dict('os.environ', {'API_KEY': 'secret-key-789'}, clear=True) +def test_sensitive_data_filter_multiple_occurrences(): + # Test with multiple occurrences of the same sensitive data + filter = SensitiveDataFilter() + + # Create a message with multiple occurrences of the same sensitive data + record = logging.LogRecord( + name='test_logger', + level=logging.INFO, + pathname='test.py', + lineno=1, + msg='Key1: secret-key-789, Key2: secret-key-789', + args=(), + exc_info=None, + ) + + # Apply the filter + filter.filter(record) + + # Check that all occurrences are masked + assert record.msg.count('******') == 2 + assert 'secret-key-789' not in record.msg + + +@patch.dict( + 'os.environ', + { + 'secret_KEY': 'secret-value-1', + 'API_secret': 'secret-value-2', + 'TOKEN_code': 'secret-value-3', + }, + clear=True, +) +def test_sensitive_data_filter_case_sensitivity(): + # Test with different case variations in environment variable names + filter = SensitiveDataFilter() + + record = logging.LogRecord( + name='test_logger', + level=logging.INFO, + pathname='test.py', + lineno=1, + msg='Values: secret-value-1, secret-value-2, secret-value-3', + args=(), + exc_info=None, + ) + + # Apply the filter + filter.filter(record) + + # Check that all sensitive values are masked regardless of case + assert 'secret-value-1' not in record.msg + assert 'secret-value-2' not in record.msg + assert 'secret-value-3' not in record.msg + assert record.msg.count('******') == 3 diff --git a/tests/unit/test_logging.py b/tests/unit/test_logging.py index 5f5ef0b57974..e225313a0710 100644 --- a/tests/unit/test_logging.py +++ b/tests/unit/test_logging.py @@ -1,5 +1,6 @@ import logging from io import StringIO +from unittest.mock import patch import pytest @@ -26,7 +27,6 @@ def test_openai_api_key_masking(test_handler): message = f"OpenAI API key: api_key='{api_key}'and there's some stuff here" logger.info(message) log_output = stream.getvalue() - assert "api_key='******'" in log_output assert api_key not in log_output @@ -36,7 +36,6 @@ def test_azure_api_key_masking(test_handler): message = f"Azure API key: api_key='{api_key}' and chatty chat with ' and \" and '" logger.info(message) log_output = stream.getvalue() - assert "api_key='******'" in log_output assert api_key not in log_output @@ -46,7 +45,6 @@ def test_google_vertex_api_key_masking(test_handler): message = f"Google Vertex API key: api_key='{api_key}' or not" logger.info(message) log_output = stream.getvalue() - assert "api_key='******'" in log_output assert api_key not in log_output @@ -56,7 +54,6 @@ def test_anthropic_api_key_masking(test_handler): message = f"Anthropic API key: api_key='{api_key}' and there's some 'stuff' here" logger.info(message) log_output = stream.getvalue() - assert "api_key='******'" in log_output assert api_key not in log_output @@ -69,9 +66,6 @@ def test_llm_config_attributes_masking(test_handler): ) logger.info(f'LLM Config: {llm_config}') log_output = stream.getvalue() - assert "api_key='******'" in log_output - assert "aws_access_key_id='******'" in log_output - assert "aws_secret_access_key='******'" in log_output assert 'sk-abc123' not in log_output assert 'AKIAIOSFODNN7EXAMPLE' not in log_output assert 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY' not in log_output @@ -82,7 +76,6 @@ def test_app_config_attributes_masking(test_handler): app_config = AppConfig(e2b_api_key='e2b-xyz789') logger.info(f'App Config: {app_config}') log_output = stream.getvalue() - assert "e2b_api_key='******'" in log_output assert 'github_token' not in log_output assert 'e2b-xyz789' not in log_output assert 'ghp_abcdefghijklmnopqrstuvwxyz' not in log_output @@ -90,7 +83,7 @@ def test_app_config_attributes_masking(test_handler): def test_sensitive_env_vars_masking(test_handler): logger, stream = test_handler - sensitive_data = { + environ = { 'API_KEY': 'API_KEY_VALUE', 'AWS_ACCESS_KEY_ID': 'AWS_ACCESS_KEY_ID_VALUE', 'AWS_SECRET_ACCESS_KEY': 'AWS_SECRET_ACCESS_KEY_VALUE', @@ -99,31 +92,29 @@ def test_sensitive_env_vars_masking(test_handler): 'JWT_SECRET': 'JWT_SECRET_VALUE', } - log_message = ' '.join( - f"{attr}='{value}'" for attr, value in sensitive_data.items() - ) - logger.info(log_message) + with patch.dict('openhands.core.logger.os.environ', environ, clear=True): + log_message = ' '.join(f"{attr}='{value}'" for attr, value in environ.items()) + logger.info(log_message) - log_output = stream.getvalue() - for attr, value in sensitive_data.items(): - assert f"{attr}='******'" in log_output - assert value not in log_output + log_output = stream.getvalue() + for _, value in environ.items(): + assert value not in log_output def test_special_cases_masking(test_handler): logger, stream = test_handler - sensitive_data = { + environ = { 'LLM_API_KEY': 'LLM_API_KEY_VALUE', 'SANDBOX_ENV_GITHUB_TOKEN': 'SANDBOX_ENV_GITHUB_TOKEN_VALUE', } - log_message = ' '.join( - f"{attr}={value} with no single quotes' and something" - for attr, value in sensitive_data.items() - ) - logger.info(log_message) + with patch.dict('openhands.core.logger.os.environ', environ, clear=True): + log_message = ' '.join( + f"{attr}={value} with no single quotes' and something" + for attr, value in environ.items() + ) + logger.info(log_message) - log_output = stream.getvalue() - for attr, value in sensitive_data.items(): - assert f"{attr}='******'" in log_output - assert value not in log_output + log_output = stream.getvalue() + for attr, value in environ.items(): + assert value not in log_output From a7bb73ded235413604ce0d0dc5aa403c0952aded Mon Sep 17 00:00:00 2001 From: Xingyao Wang Date: Mon, 17 Feb 2025 12:53:02 -0500 Subject: [PATCH 28/44] fix: disable prlimit since limiting --vm breaks nodejs (#6765) --- openhands/runtime/utils/bash.py | 16 +-- tests/runtime/test_runtime_resource.py | 150 ++++++++++++------------- 2 files changed, 84 insertions(+), 82 deletions(-) diff --git a/openhands/runtime/utils/bash.py b/openhands/runtime/utils/bash.py index 419573d7546d..09ac30d19cc3 100644 --- a/openhands/runtime/utils/bash.py +++ b/openhands/runtime/utils/bash.py @@ -189,13 +189,15 @@ def initialize(self): if self.username in ['root', 'openhands']: # This starts a non-login (new) shell for the given user _shell_command = f'su {self.username} -' - # otherwise, we are running as the CURRENT USER (e.g., when running LocalRuntime) - if self.max_memory_mb is not None: - window_command = ( - f'prlimit --as={self.max_memory_mb * 1024 * 1024} {_shell_command}' - ) - else: - window_command = _shell_command + + # FIXME: we will introduce memory limit using sysbox-runc in coming PR + # # otherwise, we are running as the CURRENT USER (e.g., when running LocalRuntime) + # if self.max_memory_mb is not None: + # window_command = ( + # f'prlimit --as={self.max_memory_mb * 1024 * 1024} {_shell_command}' + # ) + # else: + window_command = _shell_command logger.debug(f'Initializing bash session with command: {window_command}') session_name = f'openhands-{self.username}-{uuid.uuid4()}' diff --git a/tests/runtime/test_runtime_resource.py b/tests/runtime/test_runtime_resource.py index 2873939f132d..950f37a3f3b2 100644 --- a/tests/runtime/test_runtime_resource.py +++ b/tests/runtime/test_runtime_resource.py @@ -36,78 +36,78 @@ def test_stress_docker_runtime(temp_dir, runtime_cls, repeat=1): _close_test_runtime(runtime) -def test_stress_docker_runtime_hit_memory_limits(temp_dir, runtime_cls): - """Test runtime behavior under resource constraints.""" - runtime, config = _load_runtime( - temp_dir, - runtime_cls, - docker_runtime_kwargs={ - 'cpu_period': 100000, # 100ms - 'cpu_quota': 100000, # Can use 100ms out of each 100ms period (1 CPU) - 'mem_limit': '4G', # 4 GB of memory - 'memswap_limit': '0', # No swap - 'mem_swappiness': 0, # Disable swapping - 'oom_kill_disable': False, # Enable OOM killer - }, - runtime_startup_env_vars={ - 'RUNTIME_MAX_MEMORY_GB': '3', - }, - ) - - action = CmdRunAction( - command='sudo apt-get update && sudo apt-get install -y stress-ng' - ) - logger.info(action, extra={'msg_type': 'ACTION'}) - obs = runtime.run_action(action) - logger.info(obs, extra={'msg_type': 'OBSERVATION'}) - assert obs.exit_code == 0 - - action = CmdRunAction( - command='stress-ng --vm 1 --vm-bytes 6G --timeout 30s --metrics' - ) - action.set_hard_timeout(120) - logger.info(action, extra={'msg_type': 'ACTION'}) - obs = runtime.run_action(action) - logger.info(obs, extra={'msg_type': 'OBSERVATION'}) - assert 'aborted early, out of system resources' in obs.content - assert obs.exit_code == 3 # OOM killed! - - _close_test_runtime(runtime) - - -def test_stress_docker_runtime_within_memory_limits(temp_dir, runtime_cls): - """Test runtime behavior under resource constraints.""" - runtime, config = _load_runtime( - temp_dir, - runtime_cls, - docker_runtime_kwargs={ - 'cpu_period': 100000, # 100ms - 'cpu_quota': 100000, # Can use 100ms out of each 100ms period (1 CPU) - 'mem_limit': '4G', # 4 GB of memory - 'memswap_limit': '0', # No swap - 'mem_swappiness': 0, # Disable swapping - 'oom_kill_disable': False, # Enable OOM killer - }, - runtime_startup_env_vars={ - 'RUNTIME_MAX_MEMORY_GB': '7', - }, - ) - - action = CmdRunAction( - command='sudo apt-get update && sudo apt-get install -y stress-ng' - ) - logger.info(action, extra={'msg_type': 'ACTION'}) - obs = runtime.run_action(action) - logger.info(obs, extra={'msg_type': 'OBSERVATION'}) - assert obs.exit_code == 0 - - action = CmdRunAction( - command='stress-ng --vm 1 --vm-bytes 6G --timeout 30s --metrics' - ) - action.set_hard_timeout(120) - logger.info(action, extra={'msg_type': 'ACTION'}) - obs = runtime.run_action(action) - logger.info(obs, extra={'msg_type': 'OBSERVATION'}) - assert obs.exit_code == 0 - - _close_test_runtime(runtime) +# def test_stress_docker_runtime_hit_memory_limits(temp_dir, runtime_cls): +# """Test runtime behavior under resource constraints.""" +# runtime, config = _load_runtime( +# temp_dir, +# runtime_cls, +# docker_runtime_kwargs={ +# 'cpu_period': 100000, # 100ms +# 'cpu_quota': 100000, # Can use 100ms out of each 100ms period (1 CPU) +# 'mem_limit': '4G', # 4 GB of memory +# 'memswap_limit': '0', # No swap +# 'mem_swappiness': 0, # Disable swapping +# 'oom_kill_disable': False, # Enable OOM killer +# }, +# runtime_startup_env_vars={ +# 'RUNTIME_MAX_MEMORY_GB': '3', +# }, +# ) + +# action = CmdRunAction( +# command='sudo apt-get update && sudo apt-get install -y stress-ng' +# ) +# logger.info(action, extra={'msg_type': 'ACTION'}) +# obs = runtime.run_action(action) +# logger.info(obs, extra={'msg_type': 'OBSERVATION'}) +# assert obs.exit_code == 0 + +# action = CmdRunAction( +# command='stress-ng --vm 1 --vm-bytes 6G --timeout 30s --metrics' +# ) +# action.set_hard_timeout(120) +# logger.info(action, extra={'msg_type': 'ACTION'}) +# obs = runtime.run_action(action) +# logger.info(obs, extra={'msg_type': 'OBSERVATION'}) +# assert 'aborted early, out of system resources' in obs.content +# assert obs.exit_code == 3 # OOM killed! + +# _close_test_runtime(runtime) + + +# def test_stress_docker_runtime_within_memory_limits(temp_dir, runtime_cls): +# """Test runtime behavior under resource constraints.""" +# runtime, config = _load_runtime( +# temp_dir, +# runtime_cls, +# docker_runtime_kwargs={ +# 'cpu_period': 100000, # 100ms +# 'cpu_quota': 100000, # Can use 100ms out of each 100ms period (1 CPU) +# 'mem_limit': '4G', # 4 GB of memory +# 'memswap_limit': '0', # No swap +# 'mem_swappiness': 0, # Disable swapping +# 'oom_kill_disable': False, # Enable OOM killer +# }, +# runtime_startup_env_vars={ +# 'RUNTIME_MAX_MEMORY_GB': '7', +# }, +# ) + +# action = CmdRunAction( +# command='sudo apt-get update && sudo apt-get install -y stress-ng' +# ) +# logger.info(action, extra={'msg_type': 'ACTION'}) +# obs = runtime.run_action(action) +# logger.info(obs, extra={'msg_type': 'OBSERVATION'}) +# assert obs.exit_code == 0 + +# action = CmdRunAction( +# command='stress-ng --vm 1 --vm-bytes 6G --timeout 30s --metrics' +# ) +# action.set_hard_timeout(120) +# logger.info(action, extra={'msg_type': 'ACTION'}) +# obs = runtime.run_action(action) +# logger.info(obs, extra={'msg_type': 'OBSERVATION'}) +# assert obs.exit_code == 0 + +# _close_test_runtime(runtime) From 57391d6e66c52baf82a602fde6b4108ae6821770 Mon Sep 17 00:00:00 2001 From: tofarr Date: Mon, 17 Feb 2025 18:07:47 +0000 Subject: [PATCH 29/44] Enable the multi conversation UI for all users (#6374) --- frontend/__tests__/routes/_oh.app.test.tsx | 3 +-- .../components/features/sidebar/sidebar.tsx | 19 ++++++++----------- frontend/src/routes/_oh.app/route.tsx | 3 +-- frontend/src/utils/feature-flags.ts | 1 - frontend/tests/conversation-panel.test.ts | 3 --- 5 files changed, 10 insertions(+), 19 deletions(-) diff --git a/frontend/__tests__/routes/_oh.app.test.tsx b/frontend/__tests__/routes/_oh.app.test.tsx index 4fc96b25d3e5..d809b128ce08 100644 --- a/frontend/__tests__/routes/_oh.app.test.tsx +++ b/frontend/__tests__/routes/_oh.app.test.tsx @@ -5,7 +5,6 @@ import { screen, waitFor } from "@testing-library/react"; import toast from "react-hot-toast"; import App from "#/routes/_oh.app/route"; import OpenHands from "#/api/open-hands"; -import { MULTI_CONVERSATION_UI } from "#/utils/feature-flags"; describe("App", () => { const RouteStub = createRoutesStub([ @@ -35,7 +34,7 @@ describe("App", () => { await screen.findByTestId("app-route"); }); - it.skipIf(!MULTI_CONVERSATION_UI)( + it( "should call endSession if the user does not have permission to view conversation", async () => { const errorToastSpy = vi.spyOn(toast, "error"); diff --git a/frontend/src/components/features/sidebar/sidebar.tsx b/frontend/src/components/features/sidebar/sidebar.tsx index 645543ac6fd2..d44ca00e7153 100644 --- a/frontend/src/components/features/sidebar/sidebar.tsx +++ b/frontend/src/components/features/sidebar/sidebar.tsx @@ -15,7 +15,6 @@ import { SettingsModal } from "#/components/shared/modals/settings/settings-moda import { useCurrentSettings } from "#/context/settings-context"; import { useSettings } from "#/hooks/query/use-settings"; import { ConversationPanel } from "../conversation-panel/conversation-panel"; -import { MULTI_CONVERSATION_UI } from "#/utils/feature-flags"; import { useEndSession } from "#/hooks/use-end-session"; import { setCurrentAgentState } from "#/state/agent-slice"; import { AgentState } from "#/types/agent-state"; @@ -78,16 +77,14 @@ export function Sidebar() {
- {MULTI_CONVERSATION_UI && ( - setConversationPanelIsOpen((prev) => !prev)} - > - - - )} + setConversationPanelIsOpen((prev) => !prev)} + > + +
diff --git a/frontend/src/routes/_oh.app/route.tsx b/frontend/src/routes/_oh.app/route.tsx index 19450e09ed75..c605927a0aa9 100644 --- a/frontend/src/routes/_oh.app/route.tsx +++ b/frontend/src/routes/_oh.app/route.tsx @@ -34,7 +34,6 @@ import { useUserConversation } from "#/hooks/query/use-user-conversation"; import { ServedAppLabel } from "#/components/layout/served-app-label"; import { TerminalStatusLabel } from "#/components/features/terminal/terminal-status-label"; import { useSettings } from "#/hooks/query/use-settings"; -import { MULTI_CONVERSATION_UI } from "#/utils/feature-flags"; import { clearFiles, clearInitialPrompt } from "#/state/initial-query-slice"; import { RootState } from "#/store"; @@ -66,7 +65,7 @@ function AppContent() { ); React.useEffect(() => { - if (MULTI_CONVERSATION_UI && isFetched && !conversation) { + if (isFetched && !conversation) { toast.error( "This conversation does not exist, or you do not have permission to access it.", ); diff --git a/frontend/src/utils/feature-flags.ts b/frontend/src/utils/feature-flags.ts index 154e88a59924..a5f32b1128e7 100644 --- a/frontend/src/utils/feature-flags.ts +++ b/frontend/src/utils/feature-flags.ts @@ -12,5 +12,4 @@ function loadFeatureFlag( } } -export const MULTI_CONVERSATION_UI = loadFeatureFlag("MULTI_CONVERSATION_UI"); export const MEMORY_CONDENSER = loadFeatureFlag("MEMORY_CONDENSER"); diff --git a/frontend/tests/conversation-panel.test.ts b/frontend/tests/conversation-panel.test.ts index a4ef6ca6ea33..6e3f58cd458f 100644 --- a/frontend/tests/conversation-panel.test.ts +++ b/frontend/tests/conversation-panel.test.ts @@ -31,9 +31,6 @@ const selectConversationCard = async (page: Page, index: number) => { test.beforeEach(async ({ page }) => { await page.goto("/"); - await page.evaluate(() => { - localStorage.setItem("FEATURE_MULTI_CONVERSATION_UI", "true"); - }); }); test("should only display the create new conversation button when in a conversation", async ({ From 14ee6d7afef94ebf320746ecdc596231b7e139d4 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Mon, 17 Feb 2025 13:27:32 -0500 Subject: [PATCH 30/44] hotfix(Secrets): Add event stream filter for refreshed secret (#6764) --- openhands/events/stream.py | 3 +++ openhands/runtime/base.py | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/openhands/events/stream.py b/openhands/events/stream.py index 5e02c4c369dd..0fc547803f6d 100644 --- a/openhands/events/stream.py +++ b/openhands/events/stream.py @@ -282,6 +282,9 @@ def add_event(self, event: Event, source: EventSource): def set_secrets(self, secrets: dict[str, str]): self.secrets = secrets.copy() + def update_secrets(self, secrets: dict[str, str]): + self.secrets.update(secrets) + def _replace_secrets(self, data: dict) -> dict: for key in data: if isinstance(data[key], dict): diff --git a/openhands/runtime/base.py b/openhands/runtime/base.py index 4f1e37f471dc..983fc67fa898 100644 --- a/openhands/runtime/base.py +++ b/openhands/runtime/base.py @@ -225,6 +225,13 @@ async def _handle_action(self, event: Action) -> None: export_cmd = CmdRunAction( f"export GITHUB_TOKEN='{token.get_secret_value()}'" ) + + self.event_stream.update_secrets( + { + 'github_token': token.get_secret_value(), + } + ) + await call_sync_from_async(self.run, export_cmd) observation: Observation = await call_sync_from_async( From ce42e221055403e1cd6c674ff2c2a62829c97cc6 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Mon, 17 Feb 2025 13:39:49 -0500 Subject: [PATCH 31/44] [Docs]: Cloud Openhands (#6747) Co-authored-by: openhands Co-authored-by: mamoodi --- .../usage/cloud/cloud-github-resolver.md | 21 +++++++++++++ docs/modules/usage/cloud/openhands-cloud.md | 31 +++++++++++++++++++ docs/sidebars.ts | 21 +++++++++++-- 3 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 docs/modules/usage/cloud/cloud-github-resolver.md create mode 100644 docs/modules/usage/cloud/openhands-cloud.md diff --git a/docs/modules/usage/cloud/cloud-github-resolver.md b/docs/modules/usage/cloud/cloud-github-resolver.md new file mode 100644 index 000000000000..946bd1ba78d8 --- /dev/null +++ b/docs/modules/usage/cloud/cloud-github-resolver.md @@ -0,0 +1,21 @@ +# Cloud GitHub Resolver + +The GitHub Resolver automates code fixes and provides intelligent assistance for your repositories. + +## Setup + +The Cloud Github Resolver is available automatically when you +[grant OpenHands Cloud repository access](./openhands-cloud.md#adding-repositories). + +## Usage + +### Issues + +On your repository, label an issue with `openhands`. OpenHands will attempt to fix the issue. + +### Pull Requests + +In order to get OpenHands to work on pull requests, use `@openhands` in top level or single inline comments to: + - Ask questions + - Request updates + - Get code explanations diff --git a/docs/modules/usage/cloud/openhands-cloud.md b/docs/modules/usage/cloud/openhands-cloud.md new file mode 100644 index 000000000000..25a9ad2cb09f --- /dev/null +++ b/docs/modules/usage/cloud/openhands-cloud.md @@ -0,0 +1,31 @@ +# Openhands Cloud + +This document provides information about the hosted version of OpenHands. + +## Getting Started + +After visiting OpenHands Cloud, you will be asked to connect with your GitHub account: +1. After reading and accepting the terms of service, click `Connect to GitHub`. +2. Then click `Authorize OpenHands by All Hands AI`. + - Openhands requests short-lived tokens (8-hour expiry) with these permissions: + - Actions: Read and write + - Administration: Read-only + - Commit statuses: Read and write + - Contents: Read and write + - Issues: Read and write + - Metadata: Read-only + - Pull requests: Read and write + - Webhooks: Read and write + - Workflows: Read and write + +## Adding Repositories + +You can grant OpenHands specific repository access: +1. Under the `Select a GitHub project` dropdown, select `Add more repositories...`. +2. Select the organization, then choose the specific repositories to grant OpenHands access to. + - Repository access for a user is granted based on: + - Granted permission for the repository. + - User's GitHub permissions (owner/collaborator). + +You can manage repository access any time by following the above workflow or visiting the Settings page and selecting +`Configure GitHub Repositories` under the `GitHub Settings` section. diff --git a/docs/sidebars.ts b/docs/sidebars.ts index a8d88d9dfca1..da416ac30b91 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -42,7 +42,7 @@ const sidebars: SidebarsConfig = { id: 'usage/prompting/microagents-public', }, ], - } + }, ], }, { @@ -69,6 +69,23 @@ const sidebars: SidebarsConfig = { label: 'Github Actions', id: 'usage/how-to/github-action', }, + { + type: 'category', + label: 'Cloud', + items: [ + { + type: 'doc', + label: 'Openhands Cloud', + id: 'usage/cloud/openhands-cloud', + }, + + { + type: 'doc', + label: 'Cloud GitHub Resolver', + id: 'usage/cloud/cloud-github-resolver', + }, + ], + }, ], }, { @@ -185,7 +202,7 @@ const sidebars: SidebarsConfig = { type: 'doc', label: 'About', id: 'usage/about', - } + }, ], }; From 07fcb786af7c5d588b83d040b84f4a8bb5736010 Mon Sep 17 00:00:00 2001 From: Graham Neubig Date: Mon, 17 Feb 2025 14:14:26 -0500 Subject: [PATCH 32/44] Upgrade tree sitter (#6740) Co-authored-by: Engel Nyst --- poetry.lock | 173 ++++++++++++++++++++++++++++++++++++------------- pyproject.toml | 4 +- 2 files changed, 131 insertions(+), 46 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5100a72f3949..91bfdbb72fc6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -5880,14 +5880,14 @@ realtime = ["websockets (>=13,<15)"] [[package]] name = "openhands-aci" -version = "0.2.2" +version = "0.2.3" description = "An Agent-Computer Interface (ACI) designed for software development agents OpenHands." optional = false python-versions = "<4.0,>=3.12" groups = ["main"] files = [ - {file = "openhands_aci-0.2.2-py3-none-any.whl", hash = "sha256:fdcea74d5760b7f936e532dec2923f06d6ba67b13312e2d91d230e751aa255f1"}, - {file = "openhands_aci-0.2.2.tar.gz", hash = "sha256:947d6c42d4d439200d0bda4748ee8bf5f0c517e8ee554d1c819b82f1d38536c6"}, + {file = "openhands_aci-0.2.3-py3-none-any.whl", hash = "sha256:6c5479c41f3bad460a0ac078418260851166a8d4e641e0072b26459a34ef4442"}, + {file = "openhands_aci-0.2.3.tar.gz", hash = "sha256:6b8031751ec3e6d1da54969b2ec19dcbc3192676e5386cbb48d04458f8021148"}, ] [package.dependencies] @@ -5901,7 +5901,11 @@ networkx = "*" numpy = "*" pandas = "*" scipy = "*" -tree-sitter = "0.21.3" +tree-sitter = ">=0.24.0,<0.25.0" +tree-sitter-javascript = ">=0.23.1,<0.24.0" +tree-sitter-python = ">=0.23.6,<0.24.0" +tree-sitter-ruby = ">=0.23.1,<0.24.0" +tree-sitter-typescript = ">=0.23.2,<0.24.0" whatthepatch = ">=1.0.6,<2.0.0" [[package]] @@ -9483,50 +9487,68 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "tree-sitter" -version = "0.21.3" -description = "Python bindings for the Tree-Sitter parsing library" +version = "0.24.0" +description = "Python bindings to the Tree-sitter parsing library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "tree-sitter-0.24.0.tar.gz", hash = "sha256:abd95af65ca2f4f7eca356343391ed669e764f37748b5352946f00f7fc78e734"}, + {file = "tree_sitter-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f3f00feff1fc47a8e4863561b8da8f5e023d382dd31ed3e43cd11d4cae445445"}, + {file = "tree_sitter-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f9691be48d98c49ef8f498460278884c666b44129222ed6217477dffad5d4831"}, + {file = "tree_sitter-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:098a81df9f89cf254d92c1cd0660a838593f85d7505b28249216661d87adde4a"}, + {file = "tree_sitter-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b26bf9e958da6eb7e74a081aab9d9c7d05f9baeaa830dbb67481898fd16f1f5"}, + {file = "tree_sitter-0.24.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2a84ff87a2f2a008867a1064aba510ab3bd608e3e0cd6e8fef0379efee266c73"}, + {file = "tree_sitter-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:c012e4c345c57a95d92ab5a890c637aaa51ab3b7ff25ed7069834b1087361c95"}, + {file = "tree_sitter-0.24.0-cp310-cp310-win_arm64.whl", hash = "sha256:033506c1bc2ba7bd559b23a6bdbeaf1127cee3c68a094b82396718596dfe98bc"}, + {file = "tree_sitter-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de0fb7c18c6068cacff46250c0a0473e8fc74d673e3e86555f131c2c1346fb13"}, + {file = "tree_sitter-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7c9c89666dea2ce2b2bf98e75f429d2876c569fab966afefdcd71974c6d8538"}, + {file = "tree_sitter-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddb113e6b8b3e3b199695b1492a47d87d06c538e63050823d90ef13cac585fd"}, + {file = "tree_sitter-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01ea01a7003b88b92f7f875da6ba9d5d741e0c84bb1bd92c503c0eecd0ee6409"}, + {file = "tree_sitter-0.24.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:464fa5b2cac63608915a9de8a6efd67a4da1929e603ea86abaeae2cb1fe89921"}, + {file = "tree_sitter-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:3b1f3cbd9700e1fba0be2e7d801527e37c49fc02dc140714669144ef6ab58dce"}, + {file = "tree_sitter-0.24.0-cp311-cp311-win_arm64.whl", hash = "sha256:f3f08a2ca9f600b3758792ba2406971665ffbad810847398d180c48cee174ee2"}, + {file = "tree_sitter-0.24.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:14beeff5f11e223c37be7d5d119819880601a80d0399abe8c738ae2288804afc"}, + {file = "tree_sitter-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:26a5b130f70d5925d67b47db314da209063664585a2fd36fa69e0717738efaf4"}, + {file = "tree_sitter-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fc5c3c26d83c9d0ecb4fc4304fba35f034b7761d35286b936c1db1217558b4e"}, + {file = "tree_sitter-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:772e1bd8c0931c866b848d0369b32218ac97c24b04790ec4b0e409901945dd8e"}, + {file = "tree_sitter-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:24a8dd03b0d6b8812425f3b84d2f4763322684e38baf74e5bb766128b5633dc7"}, + {file = "tree_sitter-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:f9e8b1605ab60ed43803100f067eed71b0b0e6c1fb9860a262727dbfbbb74751"}, + {file = "tree_sitter-0.24.0-cp312-cp312-win_arm64.whl", hash = "sha256:f733a83d8355fc95561582b66bbea92ffd365c5d7a665bc9ebd25e049c2b2abb"}, + {file = "tree_sitter-0.24.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0d4a6416ed421c4210f0ca405a4834d5ccfbb8ad6692d4d74f7773ef68f92071"}, + {file = "tree_sitter-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e0992d483677e71d5c5d37f30dfb2e3afec2f932a9c53eec4fca13869b788c6c"}, + {file = "tree_sitter-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57277a12fbcefb1c8b206186068d456c600dbfbc3fd6c76968ee22614c5cd5ad"}, + {file = "tree_sitter-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25fa22766d63f73716c6fec1a31ee5cf904aa429484256bd5fdf5259051ed74"}, + {file = "tree_sitter-0.24.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7d5d9537507e1c8c5fa9935b34f320bfec4114d675e028f3ad94f11cf9db37b9"}, + {file = "tree_sitter-0.24.0-cp313-cp313-win_amd64.whl", hash = "sha256:f58bb4956917715ec4d5a28681829a8dad5c342cafd4aea269f9132a83ca9b34"}, + {file = "tree_sitter-0.24.0-cp313-cp313-win_arm64.whl", hash = "sha256:23641bd25dcd4bb0b6fa91b8fb3f46cc9f1c9f475efe4d536d3f1f688d1b84c8"}, +] + +[package.extras] +docs = ["sphinx (>=8.1,<9.0)", "sphinx-book-theme"] +tests = ["tree-sitter-html (>=0.23.2)", "tree-sitter-javascript (>=0.23.1)", "tree-sitter-json (>=0.24.8)", "tree-sitter-python (>=0.23.6)", "tree-sitter-rust (>=0.23.2)"] + +[[package]] +name = "tree-sitter-javascript" +version = "0.23.1" +description = "JavaScript grammar for tree-sitter" +optional = false +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "tree-sitter-0.21.3.tar.gz", hash = "sha256:b5de3028921522365aa864d95b3c41926e0ba6a85ee5bd000e10dc49b0766988"}, - {file = "tree_sitter-0.21.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:351f302b6615230c9dac9829f0ba20a94362cd658206ca9a7b2d58d73373dfb0"}, - {file = "tree_sitter-0.21.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:766e79ae1e61271e7fdfecf35b6401ad9b47fc07a0965ad78e7f97fddfdf47a6"}, - {file = "tree_sitter-0.21.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c4d3d4d4b44857e87de55302af7f2d051c912c466ef20e8f18158e64df3542a"}, - {file = "tree_sitter-0.21.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84eedb06615461b9e2847be7c47b9c5f2195d7d66d31b33c0a227eff4e0a0199"}, - {file = "tree_sitter-0.21.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9d33ea425df8c3d6436926fe2991429d59c335431bf4e3c71e77c17eb508be5a"}, - {file = "tree_sitter-0.21.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fae1ee0ff6d85e2fd5cd8ceb9fe4af4012220ee1e4cbe813305a316caf7a6f63"}, - {file = "tree_sitter-0.21.3-cp310-cp310-win_amd64.whl", hash = "sha256:bb41be86a987391f9970571aebe005ccd10222f39c25efd15826583c761a37e5"}, - {file = "tree_sitter-0.21.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:54b22c3c2aab3e3639a4b255d9df8455da2921d050c4829b6a5663b057f10db5"}, - {file = "tree_sitter-0.21.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab6e88c1e2d5e84ff0f9e5cd83f21b8e5074ad292a2cf19df3ba31d94fbcecd4"}, - {file = "tree_sitter-0.21.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3fd34ed4cd5db445bc448361b5da46a2a781c648328dc5879d768f16a46771"}, - {file = "tree_sitter-0.21.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fabc7182f6083269ce3cfcad202fe01516aa80df64573b390af6cd853e8444a1"}, - {file = "tree_sitter-0.21.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f874c3f7d2a2faf5c91982dc7d88ff2a8f183a21fe475c29bee3009773b0558"}, - {file = "tree_sitter-0.21.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ee61ee3b7a4eedf9d8f1635c68ba4a6fa8c46929601fc48a907c6cfef0cfbcb2"}, - {file = "tree_sitter-0.21.3-cp311-cp311-win_amd64.whl", hash = "sha256:0b7256c723642de1c05fbb776b27742204a2382e337af22f4d9e279d77df7aa2"}, - {file = "tree_sitter-0.21.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:669b3e5a52cb1e37d60c7b16cc2221c76520445bb4f12dd17fd7220217f5abf3"}, - {file = "tree_sitter-0.21.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2aa2a5099a9f667730ff26d57533cc893d766667f4d8a9877e76a9e74f48f0d3"}, - {file = "tree_sitter-0.21.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3e06ae2a517cf6f1abb682974f76fa760298e6d5a3ecf2cf140c70f898adf0"}, - {file = "tree_sitter-0.21.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af992dfe08b4fefcfcdb40548d0d26d5d2e0a0f2d833487372f3728cd0772b48"}, - {file = "tree_sitter-0.21.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c7cbab1dd9765138505c4a55e2aa857575bac4f1f8a8b0457744a4fefa1288e6"}, - {file = "tree_sitter-0.21.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1e66aeb457d1529370fcb0997ae5584c6879e0e662f1b11b2f295ea57e22f54"}, - {file = "tree_sitter-0.21.3-cp312-cp312-win_amd64.whl", hash = "sha256:013c750252dc3bd0e069d82e9658de35ed50eecf31c6586d0de7f942546824c5"}, - {file = "tree_sitter-0.21.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4986a8cb4acebd168474ec2e5db440e59c7888819b3449a43ce8b17ed0331b07"}, - {file = "tree_sitter-0.21.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6e217fee2e7be7dbce4496caa3d1c466977d7e81277b677f954d3c90e3272ec2"}, - {file = "tree_sitter-0.21.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f32a88afff4f2bc0f20632b0a2aa35fa9ae7d518f083409eca253518e0950929"}, - {file = "tree_sitter-0.21.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3652ac9e47cdddf213c5d5d6854194469097e62f7181c0a9aa8435449a163a9"}, - {file = "tree_sitter-0.21.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:60b4df3298ff467bc01e2c0f6c2fb43aca088038202304bf8e41edd9fa348f45"}, - {file = "tree_sitter-0.21.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:00e4d0c99dff595398ef5e88a1b1ddd53adb13233fb677c1fd8e497fb2361629"}, - {file = "tree_sitter-0.21.3-cp38-cp38-win_amd64.whl", hash = "sha256:50c91353a26946e4dd6779837ecaf8aa123aafa2d3209f261ab5280daf0962f5"}, - {file = "tree_sitter-0.21.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b17b8648b296ccc21a88d72ca054b809ee82d4b14483e419474e7216240ea278"}, - {file = "tree_sitter-0.21.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f2f057fd01d3a95cbce6794c6e9f6db3d376cb3bb14e5b0528d77f0ec21d6478"}, - {file = "tree_sitter-0.21.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:839759de30230ffd60687edbb119b31521d5ac016749358e5285816798bb804a"}, - {file = "tree_sitter-0.21.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df40aa29cb7e323898194246df7a03b9676955a0ac1f6bce06bc4903a70b5f7"}, - {file = "tree_sitter-0.21.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1d9be27dde007b569fa78ff9af5fe40d2532c998add9997a9729e348bb78fa59"}, - {file = "tree_sitter-0.21.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c4ac87735e6f98fe085244c7c020f0177d13d4c117db72ba041faa980d25d69d"}, - {file = "tree_sitter-0.21.3-cp39-cp39-win_amd64.whl", hash = "sha256:fbbd137f7d9a5309fb4cb82e2c3250ba101b0dd08a8abdce815661e6cf2cbc19"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6ca583dad4bd79d3053c310b9f7208cd597fd85f9947e4ab2294658bb5c11e35"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:94100e491a6a247aa4d14caf61230c171b6376c863039b6d9cd71255c2d815ec"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a6bc1055b061c5055ec58f39ee9b2e9efb8e6e0ae970838af74da0afb811f0a"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:056dc04fb6b24293f8c5fec43c14e7e16ba2075b3009c643abf8c85edc4c7c3c"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a11ca1c0f736da42967586b568dff8a465ee148a986c15ebdc9382806e0ce871"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-win_amd64.whl", hash = "sha256:041fa22b34250ea6eb313d33104d5303f79504cb259d374d691e38bbdc49145b"}, + {file = "tree_sitter_javascript-0.23.1-cp39-abi3-win_arm64.whl", hash = "sha256:eb28130cd2fb30d702d614cbf61ef44d1c7f6869e7d864a9cc17111e370be8f7"}, + {file = "tree_sitter_javascript-0.23.1.tar.gz", hash = "sha256:b2059ce8b150162cda05a457ca3920450adbf915119c04b8c67b5241cd7fcfed"}, ] +[package.extras] +core = ["tree-sitter (>=0.22,<1.0)"] + [[package]] name = "tree-sitter-languages" version = "1.10.2" @@ -9599,6 +9621,69 @@ files = [ [package.dependencies] tree-sitter = "*" +[[package]] +name = "tree-sitter-python" +version = "0.23.6" +description = "Python grammar for tree-sitter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tree_sitter_python-0.23.6-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:28fbec8f74eeb2b30292d97715e60fac9ccf8a8091ce19b9d93e9b580ed280fb"}, + {file = "tree_sitter_python-0.23.6-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:680b710051b144fedf61c95197db0094f2245e82551bf7f0c501356333571f7a"}, + {file = "tree_sitter_python-0.23.6-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a9dcef55507b6567207e8ee0a6b053d0688019b47ff7f26edc1764b7f4dc0a4"}, + {file = "tree_sitter_python-0.23.6-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29dacdc0cd2f64e55e61d96c6906533ebb2791972bec988450c46cce60092f5d"}, + {file = "tree_sitter_python-0.23.6-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7e048733c36f564b379831689006801feb267d8194f9e793fbb395ef1723335d"}, + {file = "tree_sitter_python-0.23.6-cp39-abi3-win_amd64.whl", hash = "sha256:a24027248399fb41594b696f929f9956828ae7cc85596d9f775e6c239cd0c2be"}, + {file = "tree_sitter_python-0.23.6-cp39-abi3-win_arm64.whl", hash = "sha256:71334371bd73d5fe080aed39fbff49ed8efb9506edebe16795b0c7567ed6a272"}, + {file = "tree_sitter_python-0.23.6.tar.gz", hash = "sha256:354bfa0a2f9217431764a631516f85173e9711af2c13dbd796a8815acfe505d9"}, +] + +[package.extras] +core = ["tree-sitter (>=0.22,<1.0)"] + +[[package]] +name = "tree-sitter-ruby" +version = "0.23.1" +description = "Ruby grammar for tree-sitter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:39f391322d2210843f07081182dbf00f8f69cfbfa4687b9575cac6d324bae443"}, + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:aa4ee7433bd42fac22e2dad4a3c0f332292ecf482e610316828c711a0bb7f794"}, + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62b36813a56006b7569db7868f6b762caa3f4e419bd0f8cf9ccbb4abb1b6254c"}, + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7bcd93972b4ca2803856d4fe0fbd04123ff29c4592bbb9f12a27528bd252341"}, + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:66c65d6c2a629783ca4ab2bab539bd6f271ce6f77cacb62845831e11665b5bd3"}, + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-win_amd64.whl", hash = "sha256:02e2c19ebefe29226c14aa63e11e291d990f5b5c20a99940ab6e7eda44e744e5"}, + {file = "tree_sitter_ruby-0.23.1-cp39-abi3-win_arm64.whl", hash = "sha256:ed042007e89f2cceeb1cbdd8b0caa68af1e2ce54c7eb2053ace760f90657ac9f"}, + {file = "tree_sitter_ruby-0.23.1.tar.gz", hash = "sha256:886ed200bfd1f3ca7628bf1c9fefd42421bbdba70c627363abda67f662caa21e"}, +] + +[package.extras] +core = ["tree-sitter (>=0.22,<1.0)"] + +[[package]] +name = "tree-sitter-typescript" +version = "0.23.2" +description = "TypeScript and TSX grammars for tree-sitter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3cd752d70d8e5371fdac6a9a4df9d8924b63b6998d268586f7d374c9fba2a478"}, + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:c7cc1b0ff5d91bac863b0e38b1578d5505e718156c9db577c8baea2557f66de8"}, + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b1eed5b0b3a8134e86126b00b743d667ec27c63fc9de1b7bb23168803879e31"}, + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e96d36b85bcacdeb8ff5c2618d75593ef12ebaf1b4eace3477e2bdb2abb1752c"}, + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:8d4f0f9bcb61ad7b7509d49a1565ff2cc363863644a234e1e0fe10960e55aea0"}, + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-win_amd64.whl", hash = "sha256:3f730b66396bc3e11811e4465c41ee45d9e9edd6de355a58bbbc49fa770da8f9"}, + {file = "tree_sitter_typescript-0.23.2-cp39-abi3-win_arm64.whl", hash = "sha256:05db58f70b95ef0ea126db5560f3775692f609589ed6f8dd0af84b7f19f1cbb7"}, + {file = "tree_sitter_typescript-0.23.2.tar.gz", hash = "sha256:7b167b5827c882261cb7a50dfa0fb567975f9b315e87ed87ad0a0a3aedb3834d"}, +] + +[package.extras] +core = ["tree-sitter (>=0.23,<1.0)"] + [[package]] name = "triton" version = "3.1.0" @@ -10655,4 +10740,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "431b15e98a730d03d7b3b8ea9ea15d812cf50802b35c18c741a69518c1a00464" +content-hash = "9ec13eb9f0359670bee8f4314216c66e568d899c3fd4992e978467732c59ee8c" diff --git a/pyproject.toml b/pyproject.toml index c4d209769f8b..5c73ba094dc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ pathspec = "^0.12.1" google-cloud-aiplatform = "*" anthropic = {extras = ["vertex"], version = "*"} grep-ast = "0.3.3" -tree-sitter = "0.21.3" +tree-sitter = "^0.24.0" bashlex = "^0.18" pyjwt = "^2.9.0" dirhash = "*" @@ -67,7 +67,7 @@ runloop-api-client = "0.23.0" libtmux = ">=0.37,<0.40" pygithub = "^2.5.0" joblib = "*" -openhands-aci = "^0.2.2" +openhands-aci = "^0.2.3" python-socketio = "^5.11.4" redis = "^5.2.0" sse-starlette = "^2.1.3" From 6c4801360117559bf8b23f061af433123cff8de9 Mon Sep 17 00:00:00 2001 From: mamoodi Date: Mon, 17 Feb 2025 16:48:57 -0500 Subject: [PATCH 33/44] Update OpenHands Cloud docs with correct permissions and instructions (#6774) --- docs/modules/usage/cloud/openhands-cloud.md | 24 ++++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/docs/modules/usage/cloud/openhands-cloud.md b/docs/modules/usage/cloud/openhands-cloud.md index 25a9ad2cb09f..f39773cb27a3 100644 --- a/docs/modules/usage/cloud/openhands-cloud.md +++ b/docs/modules/usage/cloud/openhands-cloud.md @@ -1,12 +1,26 @@ # Openhands Cloud -This document provides information about the hosted version of OpenHands. +OpenHands Cloud is the cloud hosted version of OpenHands by All Hands AI. + +## Accessing OpenHands Cloud + +Currently, users are being admitted to access OpenHands Cloud in waves. To sign up, +[join the waitlist](https://www.all-hands.dev/join-waitlist). Once you are approved, you will get an email with +instructions on how to access it. ## Getting Started After visiting OpenHands Cloud, you will be asked to connect with your GitHub account: 1. After reading and accepting the terms of service, click `Connect to GitHub`. -2. Then click `Authorize OpenHands by All Hands AI`. +2. Review the permissions requested by OpenHands and then click `Authorize OpenHands by All Hands AI`. + - OpenHands will require some permissions from your GitHub account. To read more about these permissions, + you can click the `Learn more` link on the GitHub authorize page. + +## Adding Repositories + +You can grant OpenHands specific repository access: +1. Under the `Select a GitHub project` dropdown, select `Add more repositories...`. +2. Select the organization, then choose the specific repositories to grant OpenHands access to. - Openhands requests short-lived tokens (8-hour expiry) with these permissions: - Actions: Read and write - Administration: Read-only @@ -17,12 +31,6 @@ After visiting OpenHands Cloud, you will be asked to connect with your GitHub ac - Pull requests: Read and write - Webhooks: Read and write - Workflows: Read and write - -## Adding Repositories - -You can grant OpenHands specific repository access: -1. Under the `Select a GitHub project` dropdown, select `Add more repositories...`. -2. Select the organization, then choose the specific repositories to grant OpenHands access to. - Repository access for a user is granted based on: - Granted permission for the repository. - User's GitHub permissions (owner/collaborator). From 9ff15bf94f02778210ae3ed53a124f10f05314c0 Mon Sep 17 00:00:00 2001 From: Rohit Malhotra Date: Mon, 17 Feb 2025 17:27:13 -0500 Subject: [PATCH 34/44] Add selected branch to convo metadata (#6773) --- openhands/server/routes/manage_conversations.py | 1 + openhands/storage/data_models/conversation_metadata.py | 1 + 2 files changed, 2 insertions(+) diff --git a/openhands/server/routes/manage_conversations.py b/openhands/server/routes/manage_conversations.py index 29db83007656..0209fcbae4fe 100644 --- a/openhands/server/routes/manage_conversations.py +++ b/openhands/server/routes/manage_conversations.py @@ -100,6 +100,7 @@ async def _create_new_conversation( title=conversation_title, github_user_id=user_id, selected_repository=selected_repository, + selected_branch=selected_branch, ) ) diff --git a/openhands/storage/data_models/conversation_metadata.py b/openhands/storage/data_models/conversation_metadata.py index fb1f22a38fb7..15909e9b51c2 100644 --- a/openhands/storage/data_models/conversation_metadata.py +++ b/openhands/storage/data_models/conversation_metadata.py @@ -7,6 +7,7 @@ class ConversationMetadata: conversation_id: str github_user_id: str | None selected_repository: str | None + selected_branch: str | None = None title: str | None = None last_updated_at: datetime | None = None created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) From 0a6ff463db5eb294960d0a3b7e0e227fb7509691 Mon Sep 17 00:00:00 2001 From: tofarr Date: Tue, 18 Feb 2025 12:10:07 +0000 Subject: [PATCH 35/44] CSS Fixes (#6770) --- .../context-menu/account-settings-context-menu.tsx | 2 +- frontend/src/components/features/sidebar/sidebar.tsx | 8 ++++---- frontend/src/components/shared/buttons/action-button.tsx | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/frontend/src/components/features/context-menu/account-settings-context-menu.tsx b/frontend/src/components/features/context-menu/account-settings-context-menu.tsx index 8be19387f52f..288dd7728705 100644 --- a/frontend/src/components/features/context-menu/account-settings-context-menu.tsx +++ b/frontend/src/components/features/context-menu/account-settings-context-menu.tsx @@ -22,7 +22,7 @@ export function AccountSettingsContextMenu({ {t(I18nKey.ACCOUNT_SETTINGS$LOGOUT)} diff --git a/frontend/src/components/features/sidebar/sidebar.tsx b/frontend/src/components/features/sidebar/sidebar.tsx index d44ca00e7153..d4c8f3c0b99f 100644 --- a/frontend/src/components/features/sidebar/sidebar.tsx +++ b/frontend/src/components/features/sidebar/sidebar.tsx @@ -71,8 +71,8 @@ export function Sidebar() { return ( <>