From b1719bb3dbe09644d6012316835f49dde66267d6 Mon Sep 17 00:00:00 2001 From: Boxuan Li Date: Mon, 23 Dec 2024 03:12:30 +0800 Subject: [PATCH] Add TheAgentCompany evaluation harness (#5731) --- .../benchmarks/the_agent_company/README.md | 43 +++ .../benchmarks/the_agent_company/browsing.py | 273 +++++++++++++++ .../benchmarks/the_agent_company/run_infer.py | 319 ++++++++++++++++++ .../the_agent_company/scripts/run_infer.sh | 115 +++++++ 4 files changed, 750 insertions(+) create mode 100644 evaluation/benchmarks/the_agent_company/README.md create mode 100644 evaluation/benchmarks/the_agent_company/browsing.py create mode 100644 evaluation/benchmarks/the_agent_company/run_infer.py create mode 100644 evaluation/benchmarks/the_agent_company/scripts/run_infer.sh diff --git a/evaluation/benchmarks/the_agent_company/README.md b/evaluation/benchmarks/the_agent_company/README.md new file mode 100644 index 000000000000..b3bb0077b639 --- /dev/null +++ b/evaluation/benchmarks/the_agent_company/README.md @@ -0,0 +1,43 @@ +# The Agent Company Evaluation with OpenHands + +This folder contains the evaluation harness that we built on top of the original [The Agent Company](https://github.com/TheAgentCompany/TheAgentCompany/tree/main/evaluation) ([paper](https://arxiv.org/abs/2412.14161)). + +The evaluation consists of three steps: + +1. Environment setup: [install python environment](../../README.md#development-environment), [configure LLM config](../../README.md#configure-openhands-and-your-llm), [launch services](https://github.com/TheAgentCompany/TheAgentCompany/blob/main/docs/SETUP.md). +2. [Run Evaluation](#run-inference-on-the-agent-company-instances): Run all tasks and get the evaluation results. + +## Setup Environment and LLM Configuration + +Please follow instruction [here](../../README.md#setup) to setup your local development environment and LLM. + +## Run Inference on The Agent Company Tasks + +When the `run_infer.sh` script is started, it will automatically pull all task images. Every task image will be used to create an OpenHands runtime image where the agent will operate on. + +```bash +./evaluation/benchmarks/the_agent_company/scripts/run_infer.sh \ + --agent-llm-config \ + --env-llm-config \ + --outputs-path \ + --server-hostname \ + --version + +# Example +./evaluation/benchmarks/the_agent_company/scripts/run_infer.sh \ + --agent-llm-config claude-3-5-sonnet-20240620 \ + --env-llm-config claude-3-5-sonnet-20240620 \ + --outputs-path outputs \ + --server-hostname localhost \ + --version 1.0.0 +``` + +- `agent-llm-config`: the config name for the agent LLM. This should match the config name in config.toml. This is the LLM used by the agent (e.g. CodeActAgent). +- `env-llm-config`: the config name for the environment LLM. This should match the config name in config.toml. This is used by the chat bots (NPCs) and LLM-based evaluators. +- `outputs-path`: the path to save trajectories and evaluation results. +- `server-hostname`: the hostname of the server that hosts all the web services. It could be localhost if you are running the evaluation and services on the same machine. If the services are hosted on a remote machine, you must use the hostname of the remote machine rather than IP address. +- `version`: the version of the task images to use. Currently, the only supported version is 1.0.0. + +The script is idempotent. If you run it again, it will resume from the last checkpoint. It would usually take a few days to finish evaluation. + +Note: the script will automatically skip a task if it encounters an error. This usually happens when the OpenHands runtime dies due to some unexpected errors. This means even if the script finishes, it might not have evaluated all tasks. You can manually resume the evaluation by running the script again. diff --git a/evaluation/benchmarks/the_agent_company/browsing.py b/evaluation/benchmarks/the_agent_company/browsing.py new file mode 100644 index 000000000000..7384dddbdfce --- /dev/null +++ b/evaluation/benchmarks/the_agent_company/browsing.py @@ -0,0 +1,273 @@ +################################################################################################## +# Adapted from https://github.com/TheAgentCompany/TheAgentCompany/blob/main/evaluation/browsing.py +################################################################################################## + +import base64 +import os +import re +from dataclasses import dataclass +from enum import Enum, auto +from typing import Dict, List, Optional, Union + +from openhands.core.logger import openhands_logger as logger +from openhands.events.action import BrowseInteractiveAction +from openhands.events.observation import BrowserOutputObservation +from openhands.runtime.base import Runtime + + +class ActionType(Enum): + GOTO = auto() + FILL = auto() + CLICK = auto() + NOOP = auto() + + +@dataclass +class Selector: + """ + Represents either a direct anchor ID or a descriptive selector + """ + + value: str + is_anchor: bool = False + + def __str__(self) -> str: + return f'{self.value}' + + +@dataclass +class BrowserAction: + """Base class for all browser actions""" + + action_type: ActionType + + def to_instruction(self) -> str: + """Convert the action to a browser instruction string""" + raise NotImplementedError + + +@dataclass +class GotoAction(BrowserAction): + url: str + + def __init__(self, url: str): + super().__init__(ActionType.GOTO) + self.url = url + + def to_instruction(self) -> str: + return f'goto("{self.url}")' + + +@dataclass +class NoopAction(BrowserAction): + milliseconds: int + + def __init__(self, milliseconds: int): + super().__init__(ActionType.NOOP) + self.milliseconds = milliseconds + + def to_instruction(self) -> str: + return f'noop({self.milliseconds})' + + +@dataclass +class InputAction(BrowserAction): + selector: Selector + value: str + + def __init__(self, selector: Union[str, Selector], value: str): + super().__init__(ActionType.FILL) + self.selector = ( + selector if isinstance(selector, Selector) else Selector(selector) + ) + self.value = value + + def to_instruction(self) -> str: + return f'fill("{self.selector}", "{self.value}")' + + +@dataclass +class ClickAction(BrowserAction): + selector: Selector + + def __init__(self, selector: Union[str, Selector]): + super().__init__(ActionType.CLICK) + self.selector = ( + selector if isinstance(selector, Selector) else Selector(selector) + ) + + def to_instruction(self) -> str: + return f'click("{self.selector}")' + + +def parse_content_to_elements(content: str) -> Dict[str, str]: + """Parse the observation content into a dictionary mapping anchors to their descriptions""" + elements = {} + current_anchor = None + description_lines = [] + + for line in content.split('\n'): + line = line.strip() + if not line: + continue + + # Check for anchor line + anchor_match = re.match(r'\[(\d+)\](.*)', line) + if anchor_match: + # Save previous element if it exists + if current_anchor and description_lines: + elements[current_anchor] = ' '.join(description_lines) + + # Start new element + current_anchor = anchor_match.group(1) + description_lines = [anchor_match.group(2).strip()] + else: + # Add to current description if we have an anchor + if current_anchor: + description_lines.append(line) + + # Save last element + if current_anchor and description_lines: + elements[current_anchor] = ' '.join(description_lines) + + return elements + + +def find_matching_anchor(content: str, selector: str) -> Optional[str]: + """Find the anchor ID that matches the given selector description""" + elements = parse_content_to_elements(content) + + # Clean up selector and create a pattern + selector = selector.lower().strip() + + for anchor, description in elements.items(): + description = description.lower().strip() + if selector in description: + return anchor + + return None + + +def resolve_action(action: BrowserAction, content: str) -> BrowserAction: + """ + Resolve any descriptive selectors in the action to anchor IDs based on the content. + Returns a new action with resolved selectors. + """ + if isinstance(action, (InputAction, ClickAction)): + if not action.selector.is_anchor: + anchor = find_matching_anchor(content, action.selector.value) + if anchor: + new_selector = Selector(anchor, is_anchor=True) + if isinstance(action, InputAction): + return InputAction(new_selector, action.value) + else: + return ClickAction(new_selector) + else: + logger.error(f'NO MATCH FOUND FOR SELECTOR, {action.selector}') + return None + return action + + +def pre_login( + runtime: Runtime, + services: List[str], + save_screenshots=True, + screenshots_dir='screenshots', +): + """ + Logs in to all the websites that are needed for the evaluation. + Once logged in, the sessions would be cached in the browser, so OpenHands + agent doesn't need to log in to these websites again. + """ + owncloud_login_actions = [ + GotoAction('http://the-agent-company.com:8092'), + NoopAction(1000), + InputAction("textbox '', clickable, focused, required", 'theagentcompany'), + NoopAction(1000), + InputAction("textbox '', clickable, required", 'theagentcompany'), + NoopAction(1000), + ClickAction("button '', clickable"), + NoopAction(1000), + ] + + rocketchat_login_actions = [ + GotoAction('http://the-agent-company.com:3000'), + NoopAction(1000), + InputAction("textbox '', clickable, focused", 'theagentcompany'), + NoopAction(1000), + InputAction("textbox '', clickable", 'theagentcompany'), + NoopAction(1000), + ClickAction("button 'Login', clickable"), + ] + + gitlab_login_actions = [ + GotoAction('http://the-agent-company.com:8929/users/sign_in'), + NoopAction(1000), + InputAction("textbox 'Username or primary email'", 'root'), + NoopAction(1000), + InputAction("textbox 'Password'", 'theagentcompany'), + NoopAction(1000), + ClickAction("button 'Sign in', clickable"), + ] + + # devnote: plane reset is not stable, and sometimes it fails to launch + # in which case the login action will fail, and then we would skip the task + plane_login_actions = [ + GotoAction('http://the-agent-company.com:8091'), + NoopAction(1000), + InputAction( + "textbox 'Email', clickable, focused", + 'agent@company.com', + ), + NoopAction(1000), + ClickAction("button 'Continue'"), + NoopAction(1000), + InputAction("textbox 'Enter password', clickable", 'theagentcompany'), + NoopAction(1000), + ClickAction("button 'Go to workspace'"), + ] + + all_login_actions = [ + ('owncloud', owncloud_login_actions), + ('rocketchat', rocketchat_login_actions), + ('gitlab', gitlab_login_actions), + ('plane', plane_login_actions), + ] + + for website_name, login_actions in all_login_actions: + if website_name not in services: + logger.info( + f"Skipping login for {website_name} because it's not in the list of services to reset" + ) + continue + + if save_screenshots: + directory = os.path.join(screenshots_dir, website_name) + if not os.path.exists(directory): + os.makedirs(directory) + image_id = 0 + obs: BrowserOutputObservation = None + for action in login_actions: + # Resolve any descriptive selectors to anchor IDs + if obs: + action = resolve_action(action, obs.get_agent_obs_text()) + + if not action: + logger.error(f'FAILED TO RESOLVE ACTION, {action}') + raise Exception( + 'FAILED TO RESOLVE ACTION, maybe the service is not available' + ) + + # Convert the action to an instruction string + instruction = action.to_instruction() + + browser_action = BrowseInteractiveAction(browser_actions=instruction) + browser_action.timeout = 10000 + logger.info(browser_action, extra={'msg_type': 'ACTION'}) + obs: BrowserOutputObservation = runtime.run_action(browser_action) + logger.debug(obs, extra={'msg_type': 'OBSERVATION'}) + if save_screenshots: + image_data = base64.b64decode(obs.screenshot) + with open(os.path.join(directory, f'{image_id}.png'), 'wb') as file: + file.write(image_data) + image_id += 1 diff --git a/evaluation/benchmarks/the_agent_company/run_infer.py b/evaluation/benchmarks/the_agent_company/run_infer.py new file mode 100644 index 000000000000..03561913087c --- /dev/null +++ b/evaluation/benchmarks/the_agent_company/run_infer.py @@ -0,0 +1,319 @@ +################################################################################################## +# Adapted from https://github.com/TheAgentCompany/TheAgentCompany/blob/main/evaluation/run_eval.py +################################################################################################## + +import asyncio +import base64 +import json +import os +import shutil +import tempfile +from typing import List + +import yaml +from browsing import pre_login + +from openhands.controller.state.state import State +from openhands.core.config import ( + AppConfig, + LLMConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) +from openhands.core.logger import openhands_logger as logger +from openhands.core.main import create_runtime, run_controller +from openhands.events.action import CmdRunAction, MessageAction +from openhands.events.observation import BrowserOutputObservation, CmdOutputObservation +from openhands.runtime.base import Runtime +from openhands.utils.async_utils import call_async_from_sync + + +def get_config( + base_container_image: str, + task_short_name: str, + mount_path_on_host: str, + llm_config: LLMConfig, +) -> AppConfig: + config = AppConfig( + run_as_openhands=False, + max_budget_per_task=4, + max_iterations=100, + trajectories_path=os.path.join( + mount_path_on_host, f'traj_{task_short_name}.json' + ), + sandbox=SandboxConfig( + base_container_image=base_container_image, + enable_auto_lint=True, + # using host network to access the host machine from the container + use_host_network=True, + # large enough timeout, since some testcases take very long to run + timeout=300, + api_key=os.environ.get('ALLHANDS_API_KEY', None), + ), + # we mount trajectories path so that trajectories, generated by OpenHands + # controller, can be accessible to the evaluator file in the runtime container + workspace_mount_path=mount_path_on_host, + workspace_mount_path_in_sandbox='/outputs', + ) + config.set_llm_config(llm_config) + return config + + +def load_dependencies(runtime: Runtime) -> List[str]: + """ + Every task has a dependencies.yml file, which lists all the services that the + task depends on. This function loads the file and returns all dependent service names. + """ + command = 'cat /utils/dependencies.yml' + action = CmdRunAction(command=command) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs: CmdOutputObservation = runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + dependencies = yaml.safe_load(obs.content) + if dependencies is None: + dependencies = [] + return dependencies + + +def init_task_env(runtime: Runtime, hostname: str, env_llm_config: LLMConfig): + command = ( + f'SERVER_HOSTNAME={hostname} ' + f'LITELLM_API_KEY={env_llm_config.api_key} ' + f'LITELLM_BASE_URL={env_llm_config.base_url} ' + f'LITELLM_MODEL={env_llm_config.model} ' + 'bash /utils/init.sh' + ) + action = CmdRunAction(command=command) + action.timeout = 900 + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + +def codeact_user_response(state: State) -> str: + msg = ( + 'Please continue working on the task on whatever approach you think is suitable.\n' + 'If you think you have solved the task, please finish the interaction.\n' + 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n' + ) + + if state.history: + # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up + user_msgs = [ + event + for event in state.history + if isinstance(event, MessageAction) and event.source == 'user' + ] + if len(user_msgs) >= 2: + # let the agent know that it can give up when it has tried 3 times + return ( + msg + + 'If you want to give up, run: exit .\n' + ) + return msg + + +def run_solver( + runtime: Runtime, + task_name: str, + config: AppConfig, + dependencies: List[str], + save_final_state: bool, + state_dir: str, + save_screenshots: bool, + screenshots_dir: str, +) -> State: + instruction = 'Complete the task in /instruction/task.md' + + if 'gitlab' in dependencies: + instruction += "\n\nGitlab username is 'root' and password is 'theagentcompany'" + + state: State | None = asyncio.run( + run_controller( + config=config, + sid=task_name, + initial_user_action=MessageAction(content=instruction), + runtime=runtime, + fake_user_response_fn=codeact_user_response, + ) + ) + logger.info(state) + + if save_screenshots: + screenshots_dir = os.path.join(screenshots_dir, task_name) + os.makedirs(screenshots_dir, exist_ok=True) + for image_id, obs in enumerate(state.history): + if isinstance(obs, BrowserOutputObservation): + image_data = base64.b64decode(obs.screenshot) + with open( + os.path.join(screenshots_dir, f'{image_id}.png'), 'wb' + ) as file: + file.write(image_data) + + if save_final_state: + os.makedirs(state_dir, exist_ok=True) + with open(os.path.join(state_dir, f'state_{task_name}.json'), 'w') as file: + json.dump(str(state), file) + + return state + + +def run_evaluator( + runtime: Runtime, env_llm_config: LLMConfig, trajectory_path: str, result_path: str +): + command = ( + f'LITELLM_API_KEY={env_llm_config.api_key} ' + f'LITELLM_BASE_URL={env_llm_config.base_url} ' + f'LITELLM_MODEL={env_llm_config.model} ' + f"DECRYPTION_KEY='theagentcompany is all you need' " # Hardcoded Key + f'python_default /utils/eval.py --trajectory_path {trajectory_path} --result_path {result_path}' + ) + action = CmdRunAction(command=command) + action.timeout = 600 + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + +if __name__ == '__main__': + parser = get_parser() + parser.add_argument( + '--task-image-name', + type=str, + default='ghcr.io/theagentcompany/example-image:1.0.0', + help='Task image name', + ) + parser.add_argument( + '--outputs-path', + type=str, + default='./outputs', + help='Folder path to save trajectories and evaluation results', + ) + parser.add_argument( + '--server-hostname', + type=str, + default='localhost', + help='Server hostname, e.g. localhost to access the host machine from the container, ' + 'assuming the task docker container is run with `--network host` flag', + ) + parser.add_argument( + '--agent-llm-config', + type=str, + default=None, + help='LLM config for agent', + ) + parser.add_argument( + '--env-llm-config', + type=str, + default=None, + help='LLM config for evaluation environment (NPC & llm-based evaluator)', + ) + args, _ = parser.parse_known_args() + + agent_llm_config: LLMConfig | None = None + if args.agent_llm_config: + agent_llm_config = get_llm_config_arg(args.agent_llm_config) + + if agent_llm_config is None: + raise ValueError( + f'Could not find LLM config for agent: --agent-llm-config {args.agent_llm_config}' + ) + + if agent_llm_config.api_key is None: + raise ValueError('LLM API key is not set for agent') + + env_llm_config: LLMConfig | None = None + if args.env_llm_config: + env_llm_config = get_llm_config_arg(args.env_llm_config) + + if env_llm_config is None: + raise ValueError( + f'Could not find LLM config for evaluation environment: --env-llm-config {args.env_llm_config}' + ) + + if env_llm_config.api_key is None: + raise ValueError('LLM API key is not set for evaluation environment') + + task_short_name = args.task_image_name.split('/')[-1].split(':')[0] + logger.info( + f'Task image name is {args.task_image_name}, short name is {task_short_name}' + ) + + # mount a temporary directory to pass trajectory from host to container, and to + # pass the evaluation result from container to host + # 1) trajectory is dumped by OpenHands library (on host machine), but it's needed by + # evaluator (in container), so we mount a temporary directory to pass it in + # 2) evaluation result is written by evaluator (in container), but we need to persist + # it on host machine, so we mount a temporary directory to pass it out + if os.getenv('TMPDIR') and os.path.exists(os.getenv('TMPDIR')): + temp_dir = os.path.abspath(os.getenv('TMPDIR')) + else: + temp_dir = tempfile.mkdtemp() + config: AppConfig = get_config( + args.task_image_name, task_short_name, temp_dir, agent_llm_config + ) + runtime: Runtime = create_runtime(config) + call_async_from_sync(runtime.connect) + + init_task_env(runtime, args.server_hostname, env_llm_config) + + dependencies = load_dependencies(runtime) + logger.info(f'Service dependencies: {dependencies}') + + try: + pre_login( + runtime, + dependencies, + save_screenshots=True, + screenshots_dir=os.path.join( + os.path.abspath(args.outputs_path), 'screenshots' + ), + ) + except Exception as e: + logger.error(f'Failed to pre-login: {e}') + + # before giving up, let's try to init and login again + init_task_env(runtime, args.server_hostname, env_llm_config) + pre_login( + runtime, + dependencies, + save_screenshots=True, + screenshots_dir=os.path.join( + os.path.abspath(args.outputs_path), 'screenshots' + ), + ) + + state = run_solver( + runtime, + task_short_name, + config, + dependencies, + save_final_state=True, + state_dir=os.path.abspath(args.outputs_path), + save_screenshots=True, + screenshots_dir=os.path.join(os.path.abspath(args.outputs_path), 'screenshots'), + ) + + # this path is the absolute path in the runtime container + trajectory_path = f'/outputs/traj_{task_short_name}.json' + result_path = f'/outputs/eval_{task_short_name}.json' + + run_evaluator(runtime, env_llm_config, trajectory_path, result_path) + + # finally, move trajectory file and evaluation result from mount path on host (temp dir) to outputs path + shutil.move( + os.path.join(temp_dir, f'traj_{task_short_name}.json'), + os.path.join( + os.path.abspath(args.outputs_path), f'traj_{task_short_name}.json' + ), + ) + shutil.move( + os.path.join(temp_dir, f'eval_{task_short_name}.json'), + os.path.join( + os.path.abspath(args.outputs_path), f'eval_{task_short_name}.json' + ), + ) diff --git a/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh b/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh new file mode 100644 index 000000000000..be5e49bd3ada --- /dev/null +++ b/evaluation/benchmarks/the_agent_company/scripts/run_infer.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +################################################################################################## +# Adapted from https://github.com/TheAgentCompany/TheAgentCompany/blob/main/evaluation/run_eval.sh +################################################################################################## + +# Exit on any error would be useful for debugging +if [ -n "$DEBUG" ]; then + set -e +fi + +# AGENT_LLM_CONFIG is the config name for the agent LLM +# In config.toml, you should have a section with the name +# [llm.], e.g. [llm.agent] +AGENT_LLM_CONFIG="agent" + +# ENV_LLM_CONFIG is the config name for the environment LLM, +# used by the NPCs and LLM-based evaluators. +# In config.toml, you should have a section with the name +# [llm.], e.g. [llm.env] +ENV_LLM_CONFIG="env" + +# OUTPUTS_PATH is the path to save trajectories and evaluation results +OUTPUTS_PATH="outputs" + +# SERVER_HOSTNAME is the hostname of the server that hosts all the web services, +# including RocketChat, ownCloud, GitLab, and Plane. +SERVER_HOSTNAME="localhost" + +# VERSION is the version of the task images to use +# If a task doesn't have a published image with this version, it will be skipped +# 12/15/2024: this is for forward compatibility, in the case where we add new tasks +# after the 1.0.0 release +VERSION="1.0.0" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --agent-llm-config) + AGENT_LLM_CONFIG="$2" + shift 2 + ;; + --env-llm-config) + ENV_LLM_CONFIG="$2" + shift 2 + ;; + --outputs-path) + OUTPUTS_PATH="$2" + shift 2 + ;; + --server-hostname) + SERVER_HOSTNAME="$2" + shift 2 + ;; + --version) + VERSION="$2" + shift 2 + ;; + *) + echo "Unknown argument: $1" + exit 1 + ;; + esac +done + +# Convert outputs_path to absolute path +if [[ ! "$OUTPUTS_PATH" = /* ]]; then + # If path is not already absolute (doesn't start with /), make it absolute + OUTPUTS_PATH="$(cd "$(dirname "$OUTPUTS_PATH")" 2>/dev/null && pwd)/$(basename "$OUTPUTS_PATH")" +fi + +echo "Using agent LLM config: $AGENT_LLM_CONFIG" +echo "Using environment LLM config: $ENV_LLM_CONFIG" +echo "Outputs path: $OUTPUTS_PATH" +echo "Server hostname: $SERVER_HOSTNAME" +echo "Version: $VERSION" + +echo "Downloading tasks.md..." +rm -f tasks.md +wget https://github.com/TheAgentCompany/TheAgentCompany/releases/download/${VERSION}/tasks.md + +while IFS= read -r task_image; do + docker pull $task_image + + # Remove prefix using ## to remove longest matching pattern from start + task_name=${task_image##ghcr.io/theagentcompany/} + + # Remove suffix using % to remove shortest matching pattern from end + task_name=${task_name%-image:*} + echo "Use task image $task_image, task name $task_name..." + + # Check if evaluation file exists + if [ -f "$OUTPUTS_PATH/eval_${task_name}-image.json" ]; then + echo "Skipping $task_name - evaluation file already exists" + continue + fi + + export PYTHONPATH=evaluation/benchmarks/the_agent_company:\$PYTHONPATH && \ + poetry run python run_infer.py \ + --agent-llm-config "$AGENT_LLM_CONFIG" \ + --env-llm-config "$ENV_LLM_CONFIG" \ + --outputs-path "$OUTPUTS_PATH" \ + --server-hostname "$SERVER_HOSTNAME" \ + --task-image-name "$task_image" + + # Prune unused images and volumes + docker image rm "$task_image" + docker images "ghcr.io/all-hands-ai/runtime" -q | xargs -r docker rmi -f + docker volume prune -f + docker system prune -f +done < tasks.md + +rm tasks.md + +echo "All evaluation completed successfully!"