How to embedded the parameter when migrating from AgentExecutor to langgraph #991
-
I already read the migration tutorial here: https://python.langchain.com/v0.2/docs/how_to/migrate_agent/#basic-usage prompt = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_INIT_PROMPT),
MessagesPlaceholder("chat_history", optional=True),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
],
)
agent = create_openai_tools_agent(llm, tools, prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=cfg.agent_verbose,
handle_parsing_errors=True,
) I have this system prompt SYSTEM_INIT_PROMPT = """
You are a helpful assistant.
Today is {today}.
""" And when call the AgentExecutor, I just pass the parameter like this now = tz.localize(datetime.now()).strftime("%Y/%m/%d (%A)")
res = agent_executor.invoke(
{
"input": last_message.message_content,
"chat_history": history,
"today": now,
},
{
"callbacks": callbacks,
"run_id": run_id,
"metadata": {
"thread_id": thread_id,
},
},
).get("output") How can I pass the |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 10 replies
-
@hinthornw |
Beta Was this translation helpful? Give feedback.
-
@HoangNguyen689 thanks for flagging this! we're looking into making in the meantime, you can use below example to achieve the desired behavior. for additional input keys just modify the from typing import Annotated, Literal, TypedDict
from langchain_core.messages import HumanMessage, BaseMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from langgraph.checkpoint import MemorySaver
from langgraph.graph import END, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode
# Define the tools for the agent to use
@tool
def search(query: str):
"""Call to surf the web."""
# This is a placeholder, but don't tell the LLM that...
if "sf" in query.lower() or "san francisco" in query.lower():
return ["It's 60 degrees and foggy."]
return ["It's 90 degrees and sunny."]
tools = [search]
tool_node = ToolNode(tools)
model = ChatOpenAI(model="gpt-3.5-turbo").bind_tools(tools)
SYSTEM_INIT_PROMPT = """
You are a helpful assistant.
Today is {today}.
"""
class AgentState(TypedDict):
# add any other input variables you need to keep track of here
today: str
messages: Annotated[list[BaseMessage], add_messages]
# Define the function that determines whether to continue or not
def should_continue(state: AgentState) -> Literal["tools", END]:
messages = state['messages']
last_message = messages[-1]
if last_message.tool_calls:
return "tools"
return END
def call_model(state: AgentState):
prompt = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_INIT_PROMPT),
("placeholder", "{chat_history}"),
]
)
model_runnable = prompt | model
response = model_runnable.invoke(
{
"today": state["today"],
"chat_history": state["messages"]
}
)
return {"messages": [response]}
workflow = StateGraph(AgentState)
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
should_continue,
)
workflow.add_edge("tools", 'agent')
react_agent = workflow.compile()
react_agent.invoke({
"messages": [("human", "what's the weather in sf? make sure to mention today's date")],
"today": "July 12, 2024"
}) |
Beta Was this translation helpful? Give feedback.
@HoangNguyen689 thanks for flagging this! we're looking into making
create_react_agent
more flexible to allow customizing graph state (which would enable passing other input keys in your example)in the meantime, you can use below example to achieve the desired behavior. for additional input keys just modify the
AgentState