naman1102 commited on
Commit
e89e29d
Β·
1 Parent(s): 55affd7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -80,8 +80,8 @@ def tool_node(state: AgentState, tool_request: dict) -> AgentState:
80
  # 3.a) Run the actual ToolNode on that dict:
81
  result_text = underlying_tool_node.run(tool_request)
82
 
83
- # 3.b) Update state.messages to note the tool’s output,
84
- # and clear tool_request so we don’t loop.
85
  return {
86
  "messages": [f"TOOL ({tool_request['tool']}): {result_text}"],
87
  "tool_request": None,
@@ -93,13 +93,13 @@ graph = StateGraph(AgentState)
93
  graph.add_node("agent", agent_node)
94
  graph.add_node("tools", tool_node)
95
 
96
- # 5) Simple START β†’ β€œagent” edge (no third argument needed)
97
  graph.add_edge(START, "agent")
98
 
99
- # 6) Simple β€œtools” β†’ β€œagent” edge (again, no third argument)
100
  graph.add_edge("tools", "agent")
101
 
102
- # 7) Conditional branching out of β€œagent,” exactly like the tutorial
103
  def route_agent(state: AgentState, agent_out):
104
  """
105
  When the LLM (agent_node) runs, it returns an AgentState where
@@ -130,8 +130,8 @@ compiled_graph = graph.compile()
130
  # 9) Define respond_to_input so that Gradio (and the Hugging Face submission) can call it
131
  def respond_to_input(user_input: str) -> str:
132
  initial_state: AgentState = {"messages": [], "tool_request": None, "tool_result": None}
133
- # βœ”οΈ In v0.4.7 (and 0.3.x+), you must use .run():
134
- final_state = compiled_graph.run(initial_state, user_input)
135
  # Return the last assistant message
136
  last = final_state["messages"][-1]
137
  return last.replace("ASSISTANT: ", "")
 
80
  # 3.a) Run the actual ToolNode on that dict:
81
  result_text = underlying_tool_node.run(tool_request)
82
 
83
+ # 3.b) Update state.messages to note the tool's output,
84
+ # and clear tool_request so we don't loop.
85
  return {
86
  "messages": [f"TOOL ({tool_request['tool']}): {result_text}"],
87
  "tool_request": None,
 
93
  graph.add_node("agent", agent_node)
94
  graph.add_node("tools", tool_node)
95
 
96
+ # 5) Simple START β†’ "agent" edge (no third argument needed)
97
  graph.add_edge(START, "agent")
98
 
99
+ # 6) Simple "tools" β†’ "agent" edge (again, no third argument)
100
  graph.add_edge("tools", "agent")
101
 
102
+ # 7) Conditional branching out of "agent," exactly like the tutorial
103
  def route_agent(state: AgentState, agent_out):
104
  """
105
  When the LLM (agent_node) runs, it returns an AgentState where
 
130
  # 9) Define respond_to_input so that Gradio (and the Hugging Face submission) can call it
131
  def respond_to_input(user_input: str) -> str:
132
  initial_state: AgentState = {"messages": [], "tool_request": None, "tool_result": None}
133
+ # βœ”οΈ use .invoke() in v0.4.7
134
+ final_state = compiled_graph.invoke(initial_state, user_input)
135
  # Return the last assistant message
136
  last = final_state["messages"][-1]
137
  return last.replace("ASSISTANT: ", "")