Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -42,51 +42,44 @@ tool_node = ToolNode([ocr_image, parse_excel, web_search])
|
|
42 |
graph = StateGraph(AgentState)
|
43 |
|
44 |
# ββββββββββββββββββββββββ
|
45 |
-
|
46 |
-
|
|
|
|
|
47 |
graph.add_edge(
|
48 |
START,
|
49 |
-
|
50 |
-
lambda state, user_input: {"messages": [user_input]},
|
51 |
)
|
52 |
|
53 |
-
#
|
54 |
-
#
|
55 |
-
# Only fire when the LLM returns a dict with exactly "tool":"ocr_image" or "tool":"parse_excel"
|
56 |
-
# The lambda must return that dict so the ToolNode can extract its arguments.
|
57 |
-
def route_to_tool(state: AgentState, llm_out):
|
58 |
-
# Expecting llm_out to be a dict like:
|
59 |
-
# {"tool": "ocr_image", "path": "invoice.png"}
|
60 |
-
# or {"tool": "parse_excel", "path":"sales.xlsx", "sheet_name":"Sheet1"}
|
61 |
-
if isinstance(llm_out, dict) and llm_out.get("tool") in {"ocr_image", "parse_excel", "web_search"}:
|
62 |
-
return llm_out
|
63 |
-
return None # β do not invoke the tool if it's not matching
|
64 |
-
|
65 |
graph.add_edge(
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
)
|
70 |
|
71 |
-
#
|
72 |
-
# Edge 3: ToolNode β LLM
|
73 |
-
# Whatever the tool returns (a string), feed that straight back into the LLM as the next turn.
|
74 |
-
graph.add_edge(
|
75 |
-
tool_node,
|
76 |
-
llm,
|
77 |
-
lambda state, tool_out: tool_out
|
78 |
-
)
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
)
|
88 |
|
89 |
-
|
90 |
def respond_to_input(user_input: str) -> str:
|
91 |
initial_state: AgentState = {"messages": []}
|
92 |
return graph.run(initial_state, user_input)
|
|
|
42 |
graph = StateGraph(AgentState)
|
43 |
|
44 |
# ββββββββββββββββββββββββ
|
45 |
+
graph.add_node("agent", llm)
|
46 |
+
graph.add_node("tools", tool_node)
|
47 |
+
# Edge A: START β "agent"
|
48 |
+
# Wrap the user_input into state["messages"]
|
49 |
graph.add_edge(
|
50 |
START,
|
51 |
+
"agent",
|
52 |
+
transition=lambda state, user_input: {"messages": [user_input]},
|
53 |
)
|
54 |
|
55 |
+
# Edge C: "tools" β "agent"
|
56 |
+
# Whatever string the tool returns becomes the next prompt to the LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
graph.add_edge(
|
58 |
+
"tools",
|
59 |
+
"agent",
|
60 |
+
transition=lambda state, tool_output: tool_output,
|
61 |
)
|
62 |
|
63 |
+
# 7) Use add_conditional_edges out of "agent" instead of two separate edges
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
def route_agent(state: AgentState, agent_out):
|
66 |
+
"""
|
67 |
+
Return "tools" if the LLM output is a dict with a valid "tool" key,
|
68 |
+
otherwise return "final".
|
69 |
+
"""
|
70 |
+
if isinstance(agent_out, dict) and agent_out.get("tool") in {"ocr_image", "parse_excel"}:
|
71 |
+
return "tools"
|
72 |
+
return "final"
|
73 |
+
|
74 |
+
graph.add_conditional_edges(
|
75 |
+
"agent",
|
76 |
+
route_agent,
|
77 |
+
{
|
78 |
+
"tools": "tools", # if route_agent(...) == "tools", go to the "tools" node
|
79 |
+
"final": END, # if route_agent(...) == "final", transition to END
|
80 |
+
},
|
81 |
)
|
82 |
|
|
|
83 |
def respond_to_input(user_input: str) -> str:
|
84 |
initial_state: AgentState = {"messages": []}
|
85 |
return graph.run(initial_state, user_input)
|