-
Notifications
You must be signed in to change notification settings - Fork 0
/
state.py
114 lines (87 loc) · 3.27 KB
/
state.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from typing import Annotated
from langchain_openai import ChatOpenAI
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_core.messages import BaseMessage,AIMessage,ToolMessage
from pydantic import BaseModel
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
class State(TypedDict):
messages: Annotated[list, add_messages]
input_agent_response: Annotated[list, add_messages]
class RequestAssistance(BaseModel):
"""Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.
To use this function, relay the user's 'request' so the expert can provide the right guidance.
"""
request: str
tool = DuckDuckGoSearchRun()
tools = [tool]
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0,base_url="https://api2.aigcbest.top/v1",api_key="sk-WbEpTniVLtZbePrk3e4fE2B1AbCd43B6B0FcCaC5A0478934")
# We can bind the llm to a tool definition, a pydantic model, or a json schema
llm_with_tools = llm.bind_tools(tools + [RequestAssistance])
def chatbot(state: State):
response = llm_with_tools.invoke(state["messages"])
ask_human = False
if (
response.tool_calls
and response.tool_calls[0]["name"] == RequestAssistance.__name__
):
ask_human = True
return {"messages": [response], "ask_human": ask_human}
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", ToolNode(tools=[tool]))
def create_response(response: str, ai_message: AIMessage):
return ToolMessage(
content=response,
tool_call_id=ai_message.tool_calls[0]["id"],
)
def human_node(state: State):
new_messages = []
if not isinstance(state["messages"][-1], ToolMessage):
# Typically, the user will have updated the state during the interrupt.
# If they choose not to, we will include a placeholder ToolMessage to
# let the LLM continue.
new_messages.append(
create_response("No response from human.", state["messages"][-1])
)
return {
# Append the new messages
"messages": new_messages,
# Unset the flag
"ask_human": False,
}
graph_builder.add_node("human", human_node)
def select_next_node(state: State):
if state["ask_human"]:
return "human"
# Otherwise, we can route as before
return tools_condition(state)
graph_builder.add_conditional_edges(
"chatbot",
select_next_node,
{"human": "human", "tools": "tools", "__end__": "__end__"},
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge("human", "chatbot")
graph_builder.set_entry_point("chatbot")
memory = MemorySaver()
graph = graph_builder.compile(
checkpointer=memory,
interrupt_before=["human"],
)
config = {"configurable": {"thread_id": "1"}}
events = graph.stream(
{
"messages": [
("user", "I'm learning LangGraph. Could you do some research on it for me?")
]
},
config,
stream_mode="values",
)
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()