Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -56,61 +56,52 @@ compiled_graph = graph.compile()
|
|
56 |
|
57 |
# 3) The corrected respond_to_input:
|
58 |
def respond_to_input(user_input: str) -> str:
|
59 |
-
|
60 |
-
We place only a SystemMessage in state["messages"], and pass the actual
|
61 |
-
user_input string as the second argument to invoke().
|
62 |
-
"""
|
63 |
-
# (A) First message: describe your tools
|
64 |
system_msg = SystemMessage(
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
)
|
80 |
|
81 |
-
#
|
82 |
initial_state = { "messages": [system_msg] }
|
83 |
-
|
84 |
-
# (C) Now invoke, passing user_input separately:
|
85 |
final_state = compiled_graph.invoke(initial_state, user_input)
|
86 |
|
87 |
-
#
|
88 |
last_msg = None
|
89 |
-
for msg in final_state["messages"]
|
90 |
if isinstance(msg, AIMessage):
|
91 |
last_msg = msg.content
|
92 |
break
|
93 |
|
94 |
-
# Try to parse
|
95 |
tool_dict = parse_tool_json(last_msg or "")
|
96 |
if tool_dict is not None:
|
97 |
-
#
|
98 |
result = tool_node.run(tool_dict)
|
99 |
-
#
|
100 |
new_state = {
|
101 |
-
"messages": [
|
102 |
-
*final_state["messages"],
|
103 |
-
AIMessage(content=result)
|
104 |
-
]
|
105 |
}
|
106 |
second_pass = compiled_graph.invoke(new_state, "")
|
107 |
-
#
|
108 |
-
for msg in second_pass["messages"]
|
109 |
if isinstance(msg, AIMessage):
|
110 |
return msg.content
|
111 |
return ""
|
112 |
else:
|
113 |
-
#
|
114 |
return last_msg or ""
|
115 |
|
116 |
|
|
|
56 |
|
57 |
# 3) The corrected respond_to_input:
|
58 |
def respond_to_input(user_input: str) -> str:
|
59 |
+
# 1) The tightened system prompt
|
|
|
|
|
|
|
|
|
60 |
system_msg = SystemMessage(
|
61 |
+
content=(
|
62 |
+
"You are an assistant with access to exactly these tools:\n"
|
63 |
+
" 1) web_search(query:str)\n"
|
64 |
+
" 2) parse_excel(path:str,sheet_name:str)\n"
|
65 |
+
" 3) ocr_image(path:str)\n\n"
|
66 |
+
"⚠️ **IMPORTANT** ⚠️: If (and only if) you need to call one of these tools, "
|
67 |
+
"output exactly one JSON object and nothing else. For example:\n"
|
68 |
+
"```json\n"
|
69 |
+
'{"tool":"web_search","query":"Mercedes Sosa albums 2000-2009"}\n'
|
70 |
+
"```\n"
|
71 |
+
"That JSON must start at the very first character of your response and end at the very last character—"
|
72 |
+
"no quotes, no code fences, no extra explanation.\n\n"
|
73 |
+
"If you do NOT need any tool, simply reply with your final answer as plain text (no JSON)."
|
74 |
+
)
|
75 |
)
|
76 |
|
77 |
+
# 2) Kick off the graph with only that system prompt
|
78 |
initial_state = { "messages": [system_msg] }
|
|
|
|
|
79 |
final_state = compiled_graph.invoke(initial_state, user_input)
|
80 |
|
81 |
+
# 3) Find the last AIMessage the LLM returned
|
82 |
last_msg = None
|
83 |
+
for msg in reversed(final_state["messages"]):
|
84 |
if isinstance(msg, AIMessage):
|
85 |
last_msg = msg.content
|
86 |
break
|
87 |
|
88 |
+
# 4) Try to parse that as a tool‐call dict
|
89 |
tool_dict = parse_tool_json(last_msg or "")
|
90 |
if tool_dict is not None:
|
91 |
+
# 4.a) If it’s valid, run the tool
|
92 |
result = tool_node.run(tool_dict)
|
93 |
+
# 4.b) Feed the tool’s output back into the agent a second time
|
94 |
new_state = {
|
95 |
+
"messages": [*final_state["messages"], AIMessage(content=result)]
|
|
|
|
|
|
|
96 |
}
|
97 |
second_pass = compiled_graph.invoke(new_state, "")
|
98 |
+
# 4.c) Return the last AIMessage from that second pass
|
99 |
+
for msg in reversed(second_pass["messages"]):
|
100 |
if isinstance(msg, AIMessage):
|
101 |
return msg.content
|
102 |
return ""
|
103 |
else:
|
104 |
+
# 5) If it wasn’t valid JSON → just return the plain last_msg
|
105 |
return last_msg or ""
|
106 |
|
107 |
|