From 06fec4868b928117e95e2a405ccc172278dec8ae Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Wed, 11 Oct 2023 17:06:01 -0700 Subject: [PATCH] add agent examples (#33) --- .github/workflows/langserve_ci.yml | 1 + examples/agent/client.ipynb | 105 ++++++++++++++++++ examples/agent/server.py | 82 ++++++++++++++ .../client.ipynb | 6 +- .../conversational_retrieval_chain/server.py | 4 +- 5 files changed, 194 insertions(+), 4 deletions(-) create mode 100644 examples/agent/client.ipynb create mode 100755 examples/agent/server.py diff --git a/.github/workflows/langserve_ci.yml b/.github/workflows/langserve_ci.yml index d211a8ed..7a045e93 100644 --- a/.github/workflows/langserve_ci.yml +++ b/.github/workflows/langserve_ci.yml @@ -11,6 +11,7 @@ on: - '.github/workflows/_test.yml' - '.github/workflows/langserve_ci.yml' - 'langserve/**' + - 'examples/**' - 'pyproject.toml' - 'Makefile' workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI diff --git a/examples/agent/client.ipynb b/examples/agent/client.ipynb new file mode 100644 index 00000000..79b4ad2f --- /dev/null +++ b/examples/agent/client.ipynb @@ -0,0 +1,105 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Client\n", + "\n", + "Demo of a client interacting with a remote agent. " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langserve import RemoteRunnable\n", + "\n", + "remote_runnable = RemoteRunnable(\"http://localhost:8000/\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Remote runnable has the same interface as local runnables" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'output': 'Hello! How can I assist you today?'}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await remote_runnable.ainvoke({\"input\": \"hi!\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'output': 'Eugene thinks that cats like fish.'}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "remote_runnable.invoke({\"input\": \"what does eugene think of cats?\"})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/agent/server.py b/examples/agent/server.py new file mode 100755 index 00000000..cbbc0ec5 --- /dev/null +++ b/examples/agent/server.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +"""Example LangChain server exposes a conversational retrieval chain.""" +from fastapi import FastAPI +from langchain.agents import AgentExecutor, tool +from langchain.agents.format_scratchpad import format_to_openai_functions +from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser +from langchain.chat_models import ChatOpenAI +from langchain.embeddings import OpenAIEmbeddings +from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain.tools.render import format_tool_to_openai_function +from langchain.vectorstores import FAISS +from pydantic import BaseModel + +from langserve import add_routes + +vectorstore = FAISS.from_texts( + ["cats like fish", "dogs like sticks"], embedding=OpenAIEmbeddings() +) +retriever = vectorstore.as_retriever() + + +@tool +def get_eugene_thoughts(query: str) -> list: + """Returns Eugene's thoughts on a topic.""" + return retriever.get_relevant_documents(query) + + +tools = [get_eugene_thoughts] + +prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant."), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] +) + +llm = ChatOpenAI() + +llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools]) + +agent = ( + { + "input": lambda x: x["input"], + "agent_scratchpad": lambda x: format_to_openai_functions( + x["intermediate_steps"] + ), + } + | prompt + | llm_with_tools + | OpenAIFunctionsAgentOutputParser() +) + +agent_executor = AgentExecutor(agent=agent, tools=tools) + +app = FastAPI( + title="LangChain Server", + version="1.0", + description="Spin up a simple api server using Langchain's Runnable interfaces", +) + + +# We need to add these input/output schemas because the current AgentExecutor +# is lacking in schemas. +class Input(BaseModel): + input: str + + +class Output(BaseModel): + output: str + + +# Adds routes to the app for using the chain under: +# /invoke +# /batch +# /stream +add_routes(app, agent_executor, input_type=Input, output_type=Output) + +if __name__ == "__main__": + import uvicorn + + uvicorn.run(app, host="localhost", port=8000) diff --git a/examples/conversational_retrieval_chain/client.ipynb b/examples/conversational_retrieval_chain/client.ipynb index 85f88c56..072f14c1 100644 --- a/examples/conversational_retrieval_chain/client.ipynb +++ b/examples/conversational_retrieval_chain/client.ipynb @@ -6,7 +6,7 @@ "source": [ "# Client\n", "\n", - "Demo of a client interacting with a remote retriever. " + "Demo of a client interacting with a remote conversational retrieval chain. " ] }, { @@ -70,7 +70,9 @@ } ], "source": [ - "await remote_runnable.ainvoke({\"question\": \"what do cats like?\", \"chat_history\": [(\"hi\", \"hi\")]})" + "await remote_runnable.ainvoke(\n", + " {\"question\": \"what do cats like?\", \"chat_history\": [(\"hi\", \"hi\")]}\n", + ")" ] }, { diff --git a/examples/conversational_retrieval_chain/server.py b/examples/conversational_retrieval_chain/server.py index 0a166bfc..cf56dfca 100755 --- a/examples/conversational_retrieval_chain/server.py +++ b/examples/conversational_retrieval_chain/server.py @@ -1,10 +1,10 @@ #!/usr/bin/env python """Example LangChain server exposes a conversational retrieval chain.""" from fastapi import FastAPI -from langchain.embeddings import OpenAIEmbeddings -from langchain.vectorstores import FAISS from langchain.chains import ConversationalRetrievalChain from langchain.chat_models import ChatOpenAI +from langchain.embeddings import OpenAIEmbeddings +from langchain.vectorstores import FAISS from langserve import add_routes