Parallel using tools doesn't work #1101
-
I tried to create a langgraph with a parallel branches implementing the following algorithm:
Unfortunately, it crashes with the following error:
Right before it crashes on It seems that e.g. tool call for call with id Any idea how to fix this? It seems like a bug - something in Code Reproimport functools
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.runnables.graph import MermaidDrawMethod
from langchain_core.tools import Tool, tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessageGraph
from langgraph.prebuilt.tool_node import ToolNode
def _should_continue(messages, next_node: str, process_tool_node: str):
last_message = messages[-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return next_node
return process_tool_node
def get_model():
return ChatOpenAI(api_key="XXX", model="gpt-4o")
def handle_task(
messages: list[BaseMessage],
task_description: str,
tools: list[Tool],
) -> BaseMessage:
# bind tools if there are any
model = get_model().bind_tools(tools=tools)
chain = (
ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
template="You are a helpful assistant. You have a list of tools and try to help with the task.",
partial_variables={
"task_description": task_description,
},
),
MessagesPlaceholder(variable_name="messages"),
]
)
| model
)
return chain.invoke({"messages": messages})
graph = MessageGraph()
# define branches
graph.add_conditional_edges(START, lambda _: ["branch_a", "branch_b"])
@tool
def number_of_hungry_children() -> int:
"""
Returns the number of hungry children in the world.
Returns
-------
int
The number of hungry children.
"""
return 1e7
branch_name = "branch_a"
branch_tools = [number_of_hungry_children]
graph.add_node(branch_name, functools.partial(handle_task, task_description="You respond to questions about number of hungry children.", tools=branch_tools))
graph.add_conditional_edges(branch_name, lambda messages, branch_name=branch_name, next_node=f"{branch_name}-final": functools.partial(
_should_continue, next_node=next_node, process_tool_node=f"{branch_name}-action"
)(messages))
graph.add_node(f"{branch_name}-action", ToolNode(branch_tools, name=f"{branch_name}-tools")) # call the tool
graph.add_edge(f"{branch_name}-action", branch_name) # go back to gpt so it postprocesses the answer
graph.add_node(f"{branch_name}-final", lambda messages: messages)
graph.add_edge(f"{branch_name}-final", "aggregate")
@tool
def thirsty_children_count() -> int:
"""
Returns the number of thirsty children in the world.
Returns
-------
int
The number of thirsty children.
"""
return 1e6
branch_name = "branch_b"
branch_tools = [thirsty_children_count]
graph.add_node(branch_name, functools.partial(handle_task, task_description="You respond to questions about thirsty children.", tools=branch_tools))
graph.add_conditional_edges(branch_name, lambda messages, branch_name=branch_name, next_node=f"{branch_name}-final": functools.partial(
_should_continue, next_node=next_node, process_tool_node=f"{branch_name}-action"
)(messages))
graph.add_node(f"{branch_name}-action", ToolNode(branch_tools, name=f"{branch_name}-tools")) # call the tool
graph.add_edge(f"{branch_name}-action", branch_name) # go back to gpt so it postprocesses the answer
graph.add_node(f"{branch_name}-final", lambda messages: messages)
graph.add_edge(f"{branch_name}-final", "aggregate")
# aggregate
graph.add_node("aggregate", lambda messages: messages)
graph.add_edge("aggregate", END)
graph = graph.compile()
graph.get_graph().draw_mermaid_png(draw_method=MermaidDrawMethod.API, output_file_path="graph.png")
result = graph.invoke("How many hungry and thirsty children are there?")
print(result) Versions:
|
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 3 replies
-
Will address pt 2 first since it's easy: for the "graph rendering looks funny" bit, it's because there are only 2 ways we can infer conditional edge connectivity at "compile" time:
|
Beta Was this translation helpful? Give feedback.
Will address pt 2 first since it's easy: for the "graph rendering looks funny" bit, it's because there are only 2 ways we can infer conditional edge connectivity at "compile" time: