Skip to content

Commit

Permalink
Add docs notebook integration tests (#46)
Browse files Browse the repository at this point in the history
* update notebooks for changed imports

* remove autogen from poetry

* add final_result variable in tests ipynb

* add launch ipynb integration tests

* add cache files for blog with images.ipynb

* add cache files for multi-step research agent.ipynb

* add cache files for math via python code with a single agent.ipynb

* update poetry

* update integration tests cache files

* update cache files for single_llama_index integration test

* update cache files for delegation_crewai integration test

* update cache files for blog_with_images_ipynb integration test

* update cache files for multi_step_research_agent_ipynb integration test

* update cache files for math_via_python_code_with_a_single_agent_ipynb integration test

* remove multi_step_research_agent_ipynb from integration tests

* update cache files for math_via_python_code_with_a_single_agent_ipynb integration test

* update cache files for multi_step_research_agent_ipynb integration test

* rename examples/data/agent_storage to examples/data/research_agent_storage

* fix autogen example

* add cache files for using_autogen_with_motleycrew_ipynb integration test

* update cache files for using_autogen_with_motleycrew_ipynb integration test

* update cache files for using_autogen_with_motleycrew_ipynb integration test

* remove using_autogen_with_motleycrew_ipynb from integration tests

---------

Co-authored-by: User <[email protected]>
Co-authored-by: whimo <[email protected]>
  • Loading branch information
3 people committed Jun 17, 2024
1 parent 25cd4a0 commit e2100ec
Show file tree
Hide file tree
Showing 114 changed files with 261 additions and 450 deletions.
11 changes: 6 additions & 5 deletions examples/Blog with Images.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"from motleycrew.agents.langchain.react import ReactMotleyAgent\n",
"from motleycrew.agents.llama_index import ReActLlamaIndexMotleyAgent\n",
"from motleycrew.tools.image.dall_e import DallEImageGeneratorTool\n",
"from motleycrew.common.utils import configure_logging\n",
"from motleycrew.common import configure_logging\n",
"from motleycrew.tasks import SimpleTask\n",
"\n",
"configure_logging(verbose=True)\n",
Expand Down Expand Up @@ -283,7 +283,8 @@
"blog_post_task >> illustration_task\n",
"\n",
"# Get your crew to work!\n",
"result = crew.run()"
"result = crew.run()\n",
"final_result = illustration_task.output"
]
},
{
Expand Down Expand Up @@ -349,9 +350,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:.conda-crewai3.11]",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "conda-env-.conda-crewai3.11-py"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
Expand All @@ -363,7 +364,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.8.10"
}
},
"nbformat": 4,
Expand Down
12 changes: 7 additions & 5 deletions examples/Math via python code with a single agent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"from motleycrew import MotleyCrew\n",
"from motleycrew.agents.crewai import CrewAIMotleyAgent\n",
"from motleycrew.tools import PythonREPLTool\n",
"from motleycrew.common.utils import configure_logging\n",
"from motleycrew.common import configure_logging\n",
"from motleycrew.tasks import SimpleTask\n",
"\n",
"configure_logging(verbose=True)\n",
Expand Down Expand Up @@ -781,14 +781,16 @@
"id": "65199e11-e903-470a-942e-855a83ea263e",
"metadata": {},
"outputs": [],
"source": []
"source": [
"final_result = task.output"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:.conda-crewai3.11]",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "conda-env-.conda-crewai3.11-py"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
Expand All @@ -800,7 +802,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.8.10"
}
},
"nbformat": 4,
Expand Down
17 changes: 6 additions & 11 deletions examples/Multi-step research agent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
"outputs": [],
"source": [
"DATA_DIR = os.path.realpath(os.path.join(WORKING_DIR, \"mahabharata/text/TinyTales\"))\n",
"PERSIST_DIR = WORKING_DIR / \"storage\""
"PERSIST_DIR = WORKING_DIR / \"examples/data/research_agent_storage\""
]
},
{
Expand Down Expand Up @@ -247,13 +247,7 @@
"2024-05-31 19:11:05,574 - motleycrew_logger - INFO - Available tasks: [QuestionTask(name=QuestionTask, done=False)]\n",
"INFO:motleycrew_logger:Available tasks: [QuestionTask(name=QuestionTask, done=False)]\n",
"2024-05-31 19:11:05,575 - motleycrew_logger - INFO - Processing task: QuestionTask(name=QuestionTask, done=False)\n",
"INFO:motleycrew_logger:Processing task: QuestionTask(name=QuestionTask, done=False)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:motleycrew_logger:Processing task: QuestionTask(name=QuestionTask, done=False)\n",
"2024-05-31 19:11:05,586 - motleycrew_logger - INFO - Loaded unanswered questions: [Question(id=0, question=Why did Arjuna kill Karna, his half-brother?, answer=None, context=None)]\n",
"INFO:motleycrew_logger:Loaded unanswered questions: [Question(id=0, question=Why did Arjuna kill Karna, his half-brother?, answer=None, context=None)]\n",
"2024-05-31 19:11:06,332 - motleycrew_logger - INFO - Most pertinent question according to the tool: question='Why did Arjuna kill Karna, his half-brother?' answer=None context=None\n",
Expand Down Expand Up @@ -593,7 +587,8 @@
"print(\"To explore the graph:\")\n",
"print(f\"docker run -p 8000:8000 -v {crew.graph_store.database_path}:/database --rm kuzudb/explorer:latest\")\n",
"print(\"And in the kuzu explorer at http://localhost:8000 enter\")\n",
"print(\"MATCH (A)-[r]->(B) RETURN *;\")"
"print(\"MATCH (A)-[r]->(B) RETURN *;\")\n",
"final_result = \"{}\\n\\n{}\".format(final_answer.question, final_answer.answer)"
]
},
{
Expand All @@ -613,7 +608,7 @@
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -627,7 +622,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
"version": "3.8.10"
}
},
"nbformat": 4,
Expand Down
35 changes: 20 additions & 15 deletions examples/Using AutoGen with motleycrew.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install pyautogen\n",
"import autogen\n",
"import os\n",
"\n",
Expand All @@ -75,7 +76,7 @@
" system_message=\"A human admin.\",\n",
" code_execution_config={\n",
" \"last_n_messages\": 2,\n",
" \"work_dir\": \"groupchat\",\n",
" \"work_dir\": \"examples/data/groupchat\",\n",
" \"use_docker\": False,\n",
" }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
" human_input_mode=\"TERMINATE\",\n",
Expand Down Expand Up @@ -145,10 +146,10 @@
"outputs": [],
"source": [
"from motleycrew import MotleyCrew\n",
"from motleycrew.agents.langchain.openai_tools_react import ReactOpenAIToolsAgent\n",
"from motleycrew.agents.langchain.tool_calling_react import ReActToolCallingAgent\n",
"\n",
"crew = MotleyCrew()\n",
"writer = ReactOpenAIToolsAgent(tools=[knowledge_retrieval_tool])"
"writer = ReActToolCallingAgent(tools=[knowledge_retrieval_tool])"
]
},
{
Expand All @@ -158,18 +159,18 @@
"metadata": {},
"outputs": [],
"source": [
"from motleycrew.tasks import SimpleTaskRecipe\n",
"from motleycrew.tasks import SimpleTask\n",
"\n",
"blog_post_task = SimpleTaskRecipe(\n",
"blog_post_task = SimpleTask(\n",
" crew = crew,\n",
" name=\"Produce blog post on the applications of latest advancements related to GPT-4\",\n",
" description=\"Using the insights provided by searching research papers, develop an engaging blog \"\n",
" \"post that highlights the most significant advancements on GPT-4 ant their applications.\\n\"\n",
" \"Your post should be informative yet accessible, catering to a tech-savvy audience.\\n\"\n",
" \"Make it sound cool, avoid complex words so it doesn't sound like AI. \"\n",
" \"Create a blog post of at least 4 paragraphs.\",\n",
" agent=writer,\n",
" )\n",
"crew.register_task_recipes([blog_post_task])"
" )"
]
},
{
Expand All @@ -182,13 +183,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n",
"WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n",
"WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n"
]
Expand Down Expand Up @@ -738,6 +733,16 @@
"chat_result = user_proxy.initiate_chat(assistant, message=\"What was the first computer?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d833c019",
"metadata": {},
"outputs": [],
"source": [
"final_result = chat_result.summary"
]
},
{
"cell_type": "markdown",
"id": "a62f444f",
Expand All @@ -763,7 +768,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.8.10"
}
},
"nbformat": 4,
Expand Down
44 changes: 44 additions & 0 deletions examples/data/groupchat/fetch_arxiv_gpt4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# filename: fetch_arxiv_gpt4.py
import urllib.request
import urllib.parse
import xml.etree.ElementTree as ET

def search_arxiv(query):
url = 'http://export.arxiv.org/api/query?'
params = {
'search_query': query,
'start': 0,
'max_results': 5,
'sortBy': 'submittedDate',
'sortOrder': 'descending'
}
query_string = urllib.parse.urlencode(params)
url += query_string
with urllib.request.urlopen(url) as response:
response_text = response.read()
return response_text

def parse_response(response):
root = ET.fromstring(response)
papers = []
for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
title = entry.find('{http://www.w3.org/2005/Atom}title').text
published = entry.find('{http://www.w3.org/2005/Atom}published').text
summary = entry.find('{http://www.w3.org/2005/Atom}summary').text
papers.append({'title': title, 'published': published, 'summary': summary})
return papers

def main():
query = 'all:"GPT-4"'
response = search_arxiv(query)
papers = parse_response(response)
if papers:
print("Most Recent Paper on GPT-4:")
print("Title:", papers[0]['title'])
print("Published Date:", papers[0]['published'])
print("Summary:", papers[0]['summary'])
else:
print("No papers found.")

if __name__ == '__main__':
main()
44 changes: 44 additions & 0 deletions examples/data/groupchat/fetch_latest_gpt4_paper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# filename: fetch_latest_gpt4_paper.py
import requests
from datetime import datetime

def fetch_latest_paper():
# Define the API endpoint
url = "http://export.arxiv.org/api/query"

# Set the search parameters to find papers related to GPT-4
params = {
"search_query": "all:GPT-4",
"sortBy": "submittedDate",
"sortOrder": "descending",
"max_results": 1
}

# Send a GET request to the API
response = requests.get(url, params=params)

if response.status_code == 200:
# Parse the response XML
from xml.etree import ElementTree as ET
root = ET.fromstring(response.content)

# Navigate to the entry element
entry = root.find('{http://www.w3.org/2005/Atom}entry')
if entry is not None:
# Extract title and summary (abstract)
title = entry.find('{http://www.w3.org/2005/Atom}title').text
summary = entry.find('{http://www.w3.org/2005/Atom}summary').text
published_date = entry.find('{http://www.w3.org/2005/Atom}published').text

# Convert published date to a readable format
published_datetime = datetime.strptime(published_date, '%Y-%m-%dT%H:%M:%SZ')

print("Title:", title)
print("Published Date:", published_datetime.strftime('%Y-%m-%d'))
print("Abstract:", summary.strip())
else:
print("No GPT-4 papers found.")
else:
print("Failed to fetch data from arXiv. Status code:", response.status_code)

fetch_latest_paper()

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions examples/data/research_agent_storage/docstore.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions examples/data/research_agent_storage/graph_store.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"graph_dict": {}}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"embedding_dict": {}, "text_id_to_ref_doc_id": {}, "metadata_dict": {}}
Loading

0 comments on commit e2100ec

Please sign in to comment.