diff --git a/.github/workflows/test-action.yml b/.github/workflows/test-action.yml new file mode 100644 index 000000000000..16e6a672e38f --- /dev/null +++ b/.github/workflows/test-action.yml @@ -0,0 +1,19 @@ +name: Test main.py +on: + push: + pull_request: + branches: [ main ] +jobs: + test: + runs-on: ubuntu-latest + services: + docker: + image: docker:19.03.12 + options: --privileged + steps: + - uses: actions/checkout@v4 + - name: Run main.py with fake model + run: | + export SANDBOX_CONTAINER_IMAGE="ubuntu:24.04" + python -m pip install -r requirements.txt + PYTHONPATH=`pwd` python ./opendevin/main.py -d ./ -t "write a hello world script" --model-name=fake diff --git a/agenthub/codeact_agent/__init__.py b/agenthub/codeact_agent/__init__.py index 8b925723b623..a8ac1753382b 100644 --- a/agenthub/codeact_agent/__init__.py +++ b/agenthub/codeact_agent/__init__.py @@ -9,10 +9,6 @@ from opendevin.lib.command_manager import CommandManager from opendevin.sandbox.sandbox import DockerInteractive -assert ( - "OPENAI_API_KEY" in os.environ -), "Please set the OPENAI_API_KEY environment variable." - SYSTEM_MESSAGE = """You are a helpful assistant. You will be provided access (as root) to a bash shell to complete user-provided tasks. @@ -64,7 +60,12 @@ def __init__( - instruction (str): The instruction for the agent to execute. - max_steps (int): The maximum number of steps to run the agent. """ + assert ( + "OPENAI_API_KEY" in os.environ + ), "Please set the OPENAI_API_KEY environment variable." + super().__init__(instruction, workspace_dir, max_steps) + self._history = [Message(Role.SYSTEM, SYSTEM_MESSAGE)] self._history.append(Message(Role.USER, instruction)) self.env = DockerInteractive(workspace_dir=workspace_dir) diff --git a/agenthub/langchains_agent/utils/agent.py b/agenthub/langchains_agent/utils/agent.py index 2c310a61fa49..b454d7008bb9 100644 --- a/agenthub/langchains_agent/utils/agent.py +++ b/agenthub/langchains_agent/utils/agent.py @@ -11,7 +11,7 @@ def __init__(self, task, model_name): self.task = task self.model_name = model_name self.monologue = Monologue(model_name) - self.memory = LongTermMemory() + self.memory = LongTermMemory(local_embeddings=(model_name == 'fake')) def add_event(self, event): if 'output' in event.args: @@ -22,6 +22,8 @@ def add_event(self, event): self.monologue.condense() def get_next_action(self, cmd_mgr): + if self.model_name == 'fake': + return Event('finish', {}) action_dict = llm.request_action( self.task, self.monologue.get_thoughts(), diff --git a/agenthub/langchains_agent/utils/llm.py b/agenthub/langchains_agent/utils/llm.py index 98475e3cedaf..29318811a73a 100644 --- a/agenthub/langchains_agent/utils/llm.py +++ b/agenthub/langchains_agent/utils/llm.py @@ -96,7 +96,8 @@ class NewMonologue(BaseModel): new_monologue: List[Action] def get_chain(template, model_name): - assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable to use langchains_agent." + if model_name != "fake": + assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable to use langchains_agent." llm = ChatOpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"), model_name=model_name) prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=llm) diff --git a/agenthub/langchains_agent/utils/memory.py b/agenthub/langchains_agent/utils/memory.py index c73bc613e95d..88e0e23dfa6e 100644 --- a/agenthub/langchains_agent/utils/memory.py +++ b/agenthub/langchains_agent/utils/memory.py @@ -8,11 +8,15 @@ from llama_index.vector_stores.chroma import ChromaVectorStore class LongTermMemory: - def __init__(self): + def __init__(self, local_embeddings=False): db = chromadb.Client() self.collection = db.get_or_create_collection(name="memories") vector_store = ChromaVectorStore(chroma_collection=self.collection) - self.index = VectorStoreIndex.from_vector_store(vector_store) + storage_context = StorageContext.from_defaults(vector_store=vector_store) + if local_embeddings: + self.index = VectorStoreIndex.from_vector_store(vector_store, embed_model='local') + else: + self.index = VectorStoreIndex.from_vector_store(vector_store) self.thought_idx = 0 def add_event(self, event): diff --git a/requirements.txt b/requirements.txt index a012a665b901..cfec06116943 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,5 @@ langchain-openai langchain-community llama-index llama-index-vector-stores-chroma -chromadb \ No newline at end of file +llama-index-embeddings-huggingface +chromadb