Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -32,6 +32,15 @@ from langchain_community.llms import HuggingFaceHub
|
|
32 |
from langchain_community.chat_models import ChatHuggingFace
|
33 |
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# ====== Tool Definitions ======
|
36 |
@tool
|
37 |
def duckduckgo_search(query: str) -> str:
|
@@ -272,27 +281,29 @@ class BasicAgent:
|
|
272 |
def __call__(self, question: str) -> str:
|
273 |
print(f"Agent received question: {question[:50]}{'...' if len(question) > 50 else ''}")
|
274 |
|
275 |
-
#
|
276 |
-
state =
|
277 |
-
|
278 |
-
|
|
|
|
|
279 |
final_state = self.workflow.invoke(state)
|
280 |
-
|
281 |
-
# Debug: Print the final state structure
|
282 |
print(f"Final state keys: {list(final_state.keys())}")
|
283 |
if 'history' in final_state:
|
284 |
print(f"History length: {len(final_state['history'])}")
|
285 |
for i, msg in enumerate(final_state['history']):
|
286 |
print(f"Message {i}: {type(msg).__name__} - {msg.content[:100]}...")
|
287 |
-
|
288 |
-
# Extract final answer from history
|
289 |
for msg in reversed(final_state['history']):
|
290 |
if isinstance(msg, AIMessage) and "FINAL ANSWER:" in msg.content:
|
291 |
-
# Extract and clean the final answer
|
292 |
answer = msg.content.split("FINAL ANSWER:")[1].strip()
|
293 |
print(f"Agent returning answer: {answer}")
|
294 |
return answer
|
295 |
|
|
|
|
|
|
|
296 |
|
297 |
|
298 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
32 |
from langchain_community.chat_models import ChatHuggingFace
|
33 |
|
34 |
|
35 |
+
from langchain.schema import HumanMessage # Or your framework's equivalent
|
36 |
+
|
37 |
+
def init_state(question: str):
|
38 |
+
return {
|
39 |
+
"question": question,
|
40 |
+
"history": [HumanMessage(content=question)]
|
41 |
+
}
|
42 |
+
|
43 |
+
|
44 |
# ====== Tool Definitions ======
|
45 |
@tool
|
46 |
def duckduckgo_search(query: str) -> str:
|
|
|
281 |
def __call__(self, question: str) -> str:
|
282 |
print(f"Agent received question: {question[:50]}{'...' if len(question) > 50 else ''}")
|
283 |
|
284 |
+
# Ensure proper HumanMessage in history
|
285 |
+
state = {
|
286 |
+
"question": question,
|
287 |
+
"history": [HumanMessage(content=question)]
|
288 |
+
}
|
289 |
+
|
290 |
final_state = self.workflow.invoke(state)
|
291 |
+
|
|
|
292 |
print(f"Final state keys: {list(final_state.keys())}")
|
293 |
if 'history' in final_state:
|
294 |
print(f"History length: {len(final_state['history'])}")
|
295 |
for i, msg in enumerate(final_state['history']):
|
296 |
print(f"Message {i}: {type(msg).__name__} - {msg.content[:100]}...")
|
297 |
+
|
|
|
298 |
for msg in reversed(final_state['history']):
|
299 |
if isinstance(msg, AIMessage) and "FINAL ANSWER:" in msg.content:
|
|
|
300 |
answer = msg.content.split("FINAL ANSWER:")[1].strip()
|
301 |
print(f"Agent returning answer: {answer}")
|
302 |
return answer
|
303 |
|
304 |
+
raise ValueError("No FINAL ANSWER found in agent history.")
|
305 |
+
|
306 |
+
|
307 |
|
308 |
|
309 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|