From 7954f6b51c55c19b172e57049f92c1dfcf7f2f67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 7 Jan 2024 12:43:23 -0300 Subject: [PATCH] Reliability improvements (#77) * fixing identation for AgentTools * updating gitignore to exclude quick test script * startingprompt translation * supporting individual task output * adding agent to task output * cutting new version * Updating README example --- .gitignore | 3 +- README.md | 44 +++++++------- crewai/agent.py | 4 +- crewai/crew.py | 18 +++--- crewai/prompts.py | 111 +++++++++++++----------------------- crewai/prompts/en.json | 8 +++ crewai/task.py | 9 ++- crewai/tasks/task_output.py | 17 ++++++ crewai/tools/agent_tools.py | 18 +++--- pyproject.toml | 2 +- tests/crew_test.py | 14 ++--- 11 files changed, 125 insertions(+), 123 deletions(-) create mode 100644 crewai/prompts/en.json create mode 100644 crewai/tasks/task_output.py diff --git a/.gitignore b/.gitignore index f89d817159..6ed598fe3f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ __pycache__ dist/ .env assets/* -.idea \ No newline at end of file +.idea +test.py \ No newline at end of file diff --git a/README.md b/README.md index 33e46271e7..28f40419f4 100644 --- a/README.md +++ b/README.md @@ -44,18 +44,14 @@ pip install duckduckgo-search import os from crewai import Agent, Task, Crew, Process +os.environ["OPENAI_API_KEY"] = "YOUR KEY" + # You can choose to use a local model through Ollama for example. -# In this case we will use OpenHermes 2.5 as an example. # # from langchain.llms import Ollama # ollama_llm = Ollama(model="openhermes") -# If you are using an ollama like above you don't need to set OPENAI_API_KEY. -os.environ["OPENAI_API_KEY"] = "Your Key" - -# Define your tools, custom or not. # Install duckduckgo-search for this example: -# # !pip install -U duckduckgo-search from langchain.tools import DuckDuckGoSearchRun @@ -65,41 +61,46 @@ search_tool = DuckDuckGoSearchRun() researcher = Agent( role='Senior Research Analyst', goal='Uncover cutting-edge developments in AI and data science in', - backstory="""You are a Senior Research Analyst at a leading tech think tank. - Your expertise lies in identifying emerging trends and technologies in AI and - data science. You have a knack for dissecting complex data and presenting + backstory="""You work at a leading tech think tank. + Your expertise lies in identifying emerging trends. + You have a knack for dissecting complex data and presenting actionable insights.""", verbose=True, allow_delegation=False, tools=[search_tool] - # (optional) llm=ollama_llm, If you wanna use a local modal through Ollama, default is GPT4 with temperature=0.7 - + # You can pass an optional llm attribute specifying what mode you wanna use. + # It can be a local model through Ollama / LM Studio or a remote + # model like OpenAI, Mistral, Antrophic of others (https://python.langchain.com/docs/integrations/llms/) + # + # Examples: + # llm=ollama_llm # was defined above in the file + # llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7) ) writer = Agent( role='Tech Content Strategist', goal='Craft compelling content on tech advancements', - backstory="""You are a renowned Tech Content Strategist, known for your insightful - and engaging articles on technology and innovation. With a deep understanding of - the tech industry, you transform complex concepts into compelling narratives.""", + backstory="""You are a renowned Content Strategist, known for + your insightful and engaging articles. + You transform complex concepts into compelling narratives.""", verbose=True, - # (optional) llm=ollama_llm, If you wanna use a local modal through Ollama, default is GPT4 with temperature=0.7 - allow_delegation=True + allow_delegation=True, + # (optional) llm=ollama_llm ) # Create tasks for your agents task1 = Task( description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024. Identify key trends, breakthrough technologies, and potential industry impacts. - Compile your findings in a detailed report. Your final answer MUST be a full analysis report""", + Your final answer MUST be a full analysis report""", agent=researcher ) task2 = Task( - description="""Using the insights from the researcher's report, develop an engaging blog + description="""Using the insights provided, develop an engaging blog post that highlights the most significant AI advancements. Your post should be informative yet accessible, catering to a tech-savvy audience. - Aim for a narrative that captures the essence of these breakthroughs and their - implications for the future. Your final answer MUST be the full blog post of at least 3 paragraphs.""", + Make it sound cool, avoid complex words so it doesn't sound like AI. + Your final answer MUST be the full blog post of at least 4 paragraphs.""", agent=writer ) @@ -107,8 +108,7 @@ task2 = Task( crew = Crew( agents=[researcher, writer], tasks=[task1, task2], - verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels - process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next. + verbose=2, # You can set it to 1 or 2 to different logging levels ) # Get your crew to work! diff --git a/crewai/agent.py b/crewai/agent.py index f8e9a11936..2a4f0d42f6 100644 --- a/crewai/agent.py +++ b/crewai/agent.py @@ -155,9 +155,9 @@ def __create_agent_executor(self) -> CrewAgentExecutor: ) executor_args["memory"] = summary_memory agent_args["chat_history"] = lambda x: x["chat_history"] - prompt = Prompts.TASK_EXECUTION_WITH_MEMORY_PROMPT + prompt = Prompts().task_execution_with_memory() else: - prompt = Prompts.TASK_EXECUTION_PROMPT + prompt = Prompts().task_execution() execution_prompt = prompt.partial( goal=self.goal, diff --git a/crewai/crew.py b/crewai/crew.py index 554ca3fce3..00db609183 100644 --- a/crewai/crew.py +++ b/crewai/crew.py @@ -111,21 +111,21 @@ def __sequential_loop(self) -> str: Returns: Output of the crew. """ - task_outcome = None + task_output = None for task in self.tasks: # Add delegation tools to the task if the agent allows it if task.agent.allow_delegation: - tools = AgentTools(agents=self.agents).tools() - task.tools += tools + agent_tools = AgentTools(agents=self.agents).tools() + task.tools += agent_tools self.__log("debug", f"Working Agent: {task.agent.role}") - self.__log("info", f"Starting Task: {task.description} ...") + self.__log("info", f"Starting Task: {task.description}") - task_outcome = task.execute(task_outcome) - - self.__log("debug", f"Task output: {task_outcome}") - - return task_outcome + task_output = task.execute(task_output) + self.__log( + "debug", f"\n\n[{task.agent.role}] Task output: {task_output}\n\n" + ) + return task_output def __log(self, level, message): """Log a message""" diff --git a/crewai/prompts.py b/crewai/prompts.py index 62d852e6ba..037efcd70d 100644 --- a/crewai/prompts.py +++ b/crewai/prompts.py @@ -1,84 +1,53 @@ """Prompts for generic agent.""" - -from textwrap import dedent -from typing import ClassVar +import json +import os +from typing import ClassVar, Dict, Optional from langchain.prompts import PromptTemplate -from pydantic import BaseModel +from pydantic import BaseModel, Field, PrivateAttr, model_validator class Prompts(BaseModel): """Prompts for generic agent.""" - TASK_SLICE: ClassVar[str] = dedent( - """\ - Begin! This is VERY important to you, your job depends on it! - - Current Task: {input}""" + _prompts: Optional[Dict[str, str]] = PrivateAttr() + language: Optional[str] = Field( + default="en", + description="Language of crewai prompts.", ) - SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}" - - MEMORY_SLICE: ClassVar[str] = dedent( - """\ - This is the summary of your work so far: - {chat_history}""" - ) - - ROLE_PLAYING_SLICE: ClassVar[str] = dedent( - """\ - You are {role}. - {backstory} - - Your personal goal is: {goal}""" - ) - - TOOLS_SLICE: ClassVar[str] = dedent( - """\ - - - TOOLS: - ------ - You have access to the following tools: - - {tools} - - To use a tool, please use the exact following format: + @model_validator(mode="after") + def load_prompts(self) -> "Prompts": + """Load prompts from file.""" + dir_path = os.path.dirname(os.path.realpath(__file__)) + prompts_path = os.path.join(dir_path, f"prompts/{self.language}.json") - ``` - Thought: Do I need to use a tool? Yes - Action: the action to take, should be one of [{tool_names}], just the name. - Action Input: the input to the action - Observation: the result of the action - ``` + with open(prompts_path, "r") as f: + self._prompts = json.load(f)["slices"] + return self - When you have a response for your task, or if you do not need to use a tool, you MUST use the format: - - ``` - Thought: Do I need to use a tool? No - Final Answer: [your response here] - ```""" - ) - - VOTING_SLICE: ClassVar[str] = dedent( - """\ - You are working on a crew with your co-workers and need to decide who will execute the task. - - These are your format instructions: - {format_instructions} - - These are your co-workers and their roles: - {coworkers}""" - ) - - TASK_EXECUTION_WITH_MEMORY_PROMPT: ClassVar[str] = PromptTemplate.from_template( - ROLE_PLAYING_SLICE + TOOLS_SLICE + MEMORY_SLICE + TASK_SLICE + SCRATCHPAD_SLICE - ) - - TASK_EXECUTION_PROMPT: ClassVar[str] = PromptTemplate.from_template( - ROLE_PLAYING_SLICE + TOOLS_SLICE + TASK_SLICE + SCRATCHPAD_SLICE - ) + SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}" - CONSENSUNS_VOTING_PROMPT: ClassVar[str] = PromptTemplate.from_template( - ROLE_PLAYING_SLICE + VOTING_SLICE + TASK_SLICE + SCRATCHPAD_SLICE - ) + def task_execution_with_memory(self) -> str: + return PromptTemplate.from_template( + self._prompts["role_playing"] + + self._prompts["tools"] + + self._prompts["memory"] + + self._prompts["task"] + + self.SCRATCHPAD_SLICE + ) + + def task_execution_without_tools(self) -> str: + return PromptTemplate.from_template( + self._prompts["role_playing"] + + self._prompts["task"] + + self.SCRATCHPAD_SLICE + ) + + def task_execution(self) -> str: + return PromptTemplate.from_template( + self._prompts["role_playing"] + + self._prompts["tools"] + + self._prompts["task"] + + self.SCRATCHPAD_SLICE + ) diff --git a/crewai/prompts/en.json b/crewai/prompts/en.json new file mode 100644 index 0000000000..b3a12d7553 --- /dev/null +++ b/crewai/prompts/en.json @@ -0,0 +1,8 @@ +{ + "slices": { + "task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}", + "memory": "This is the summary of your work so far:\n{chat_history}", + "role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}", + "tools": "TOOLS:\n------\nYou have access to the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]" + } +} \ No newline at end of file diff --git a/crewai/task.py b/crewai/task.py index 7198e99e27..79818f620f 100644 --- a/crewai/task.py +++ b/crewai/task.py @@ -5,6 +5,7 @@ from pydantic_core import PydanticCustomError from crewai.agent import Agent +from crewai.tasks.task_output import TaskOutput class Task(BaseModel): @@ -19,6 +20,9 @@ class Task(BaseModel): default_factory=list, description="Tools the agent are limited to use for this task.", ) + output: Optional[TaskOutput] = Field( + description="Task output, it's final result.", default=None + ) id: UUID4 = Field( default_factory=uuid.uuid4, frozen=True, @@ -46,9 +50,12 @@ def execute(self, context: str = None) -> str: Output of the task. """ if self.agent: - return self.agent.execute_task( + result = self.agent.execute_task( task=self.description, context=context, tools=self.tools ) + + self.output = TaskOutput(description=self.description, result=result) + return result else: raise Exception( f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, either consensual or hierarchical." diff --git a/crewai/tasks/task_output.py b/crewai/tasks/task_output.py new file mode 100644 index 0000000000..8a3c521404 --- /dev/null +++ b/crewai/tasks/task_output.py @@ -0,0 +1,17 @@ +from typing import Optional + +from pydantic import BaseModel, Field, model_validator + + +class TaskOutput(BaseModel): + """Class that represents the result of a task.""" + + description: str = Field(description="Description of the task") + summary: Optional[str] = Field(description="Summary of the task", default=None) + result: str = Field(description="Result of the task") + + @model_validator(mode="after") + def set_summary(self): + excerpt = " ".join(self.description.split(" ")[0:10]) + self.summary = f"{excerpt}..." + return self diff --git a/crewai/tools/agent_tools.py b/crewai/tools/agent_tools.py index 90b317ece9..db663c10bb 100644 --- a/crewai/tools/agent_tools.py +++ b/crewai/tools/agent_tools.py @@ -8,7 +8,7 @@ class AgentTools(BaseModel): - """Tools for generic agent.""" + """Default tools around agent delegation""" agents: List[Agent] = Field(description="List of agents in this crew.") @@ -20,12 +20,12 @@ def tools(self): description=dedent( f"""\ Useful to delegate a specific task to one of the - following co-workers: [{', '.join([agent.role for agent in self.agents])}]. - The input to this tool should be a pipe (|) separated text of length - three, representing the co-worker you want to ask it to (one of the options), + following co-workers: [{', '.join([agent.role for agent in self.agents])}]. + The input to this tool should be a pipe (|) separated text of length + three, representing the co-worker you want to ask it to (one of the options), the task and all actual context you have for the task. For example, `coworker|task|context`. - """ + """ ), ), Tool.from_function( @@ -34,12 +34,12 @@ def tools(self): description=dedent( f"""\ Useful to ask a question, opinion or take from on - of the following co-workers: [{', '.join([agent.role for agent in self.agents])}]. - The input to this tool should be a pipe (|) separated text of length - three, representing the co-worker you want to ask it to (one of the options), + of the following co-workers: [{', '.join([agent.role for agent in self.agents])}]. + The input to this tool should be a pipe (|) separated text of length + three, representing the co-worker you want to ask it to (one of the options), the question and all actual context you have for the question. For example, `coworker|question|context`. - """ + """ ), ), ] diff --git a/pyproject.toml b/pyproject.toml index 9727e626ef..e177300a16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "crewai" -version = "0.1.16" +version = "0.1.23" description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." authors = ["Joao Moura "] readme = "README.md" diff --git a/tests/crew_test.py b/tests/crew_test.py index ddbab470b5..5271e2afb8 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -180,11 +180,11 @@ def test_crew_verbose_output(capsys): captured = capsys.readouterr() expected_strings = [ "Working Agent: Researcher", - "Starting Task: Research AI advancements. ...", - "Task output:", + "Starting Task: Research AI advancements.", + "[Researcher] Task output:", "Working Agent: Senior Writer", - "Starting Task: Write about AI in healthcare. ...", - "Task output:", + "Starting Task: Write about AI in healthcare.", + "[Senior Writer] Task output:", ] for expected_string in expected_strings: @@ -205,7 +205,7 @@ def test_crew_verbose_levels_output(capsys): crew.kickoff() captured = capsys.readouterr() - expected_strings = ["Working Agent: Researcher", "Task output:"] + expected_strings = ["Working Agent: Researcher", "[Researcher] Task output:"] for expected_string in expected_strings: assert expected_string in captured.out @@ -216,8 +216,8 @@ def test_crew_verbose_levels_output(capsys): captured = capsys.readouterr() expected_strings = [ "Working Agent: Researcher", - "Starting Task: Write about AI advancements. ...", - "Task output:", + "Starting Task: Write about AI advancements.", + "[Researcher] Task output:", ] for expected_string in expected_strings: