Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -135,13 +135,11 @@ class DocumentQATool(BaseTool):
|
|
135 |
|
136 |
class PythonExecutionTool(BaseTool):
|
137 |
name: str = "python_execution"
|
138 |
-
description: str = "Executes Python code for complex calculations, data manipulation, or logical operations. Always assign the final result to a variable named '_result_value'."
|
139 |
def _run(self, code: str) -> str:
|
140 |
print(f"DEBUG: Executing python_execution with code: {code}")
|
141 |
try:
|
142 |
local_vars = {}
|
143 |
-
# It's generally unsafe to use `exec` with arbitrary user input due to security risks.
|
144 |
-
# For a real application, consider a sandboxed environment or a more restricted approach.
|
145 |
exec(code, globals(), local_vars)
|
146 |
if '_result_value' in local_vars:
|
147 |
return str(local_vars['_result_value'])
|
@@ -289,23 +287,29 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
289 |
# --- Defensive checks at the start of the node ---
|
290 |
if state is None:
|
291 |
raise ValueError("reasoning_node received a None state object.")
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
print("WARNING: 'context' is None on entry to reasoning_node. Re-initializing to empty dict.")
|
297 |
state["context"] = {}
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
|
303 |
print(f"DEBUG: Entering reasoning_node. Iteration: {state['iterations']}")
|
304 |
# Use .get() for safety when printing history length
|
305 |
print(f"DEBUG: Current history length: {len(state.get('history', []))}")
|
306 |
|
307 |
# Set defaults for state components that might be missing, although TypedDict implies presence
|
308 |
-
state.setdefault("context", {})
|
309 |
state.setdefault("reasoning", "")
|
310 |
state.setdefault("iterations", 0)
|
311 |
state.setdefault("current_task", "Understand the question and plan the next step.")
|
@@ -317,12 +321,8 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
317 |
state["final_answer"] = "Agent halted due to exceeding maximum allowed reasoning iterations."
|
318 |
return state
|
319 |
|
320 |
-
#
|
321 |
-
|
322 |
-
state["context"].pop("pending_action", None)
|
323 |
-
else:
|
324 |
-
print("WARNING: state['context'] is not a dictionary in reasoning_node. Cannot pop pending_action.")
|
325 |
-
state["context"] = {} # Re-initialize if it's corrupted
|
326 |
|
327 |
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
|
328 |
print(f"DEBUG: Loading local model: {model_name}...")
|
@@ -344,9 +344,9 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
344 |
)
|
345 |
llm = HuggingFacePipeline(pipeline=pipe)
|
346 |
|
347 |
-
# Ensure state.get("tools") returns a list before iterating
|
348 |
tool_descriptions = "\n".join([
|
349 |
-
f"- **{t.name}**: {t.description}" for t in state.get("tools", [])
|
350 |
])
|
351 |
|
352 |
if "vector_store" not in state["context"]:
|
@@ -356,12 +356,11 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
356 |
vector_store = state["context"].get("vector_store")
|
357 |
if vector_store is None:
|
358 |
print("ERROR: Vector store is None after creation/retrieval in reasoning_node. Cannot perform similarity search.")
|
359 |
-
# Handle this error more gracefully, e.g., return an error state or raise exception
|
360 |
state["final_answer"] = "Internal error: Vector store not available."
|
361 |
return state
|
362 |
|
363 |
# Ensure question is a string for similarity_search
|
364 |
-
query_for_docs = state["question"] if isinstance(state
|
365 |
relevant_docs = vector_store.similarity_search(
|
366 |
query_for_docs,
|
367 |
k=3
|
@@ -372,7 +371,7 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
372 |
rag_context += "\n---\n".join([doc.page_content for doc in relevant_docs if doc is not None])
|
373 |
|
374 |
|
375 |
-
system_prompt_template = (
|
376 |
"You are an expert problem solver, designed to provide concise and accurate answers. "
|
377 |
"Your process involves analyzing the question, intelligently selecting and using tools, "
|
378 |
"and synthesizing information.\n\n"
|
@@ -416,9 +415,8 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
416 |
)
|
417 |
|
418 |
prompt = ChatPromptTemplate.from_messages([
|
419 |
-
SystemMessage(content=system_prompt_template),
|
420 |
-
*state["history"] # This assumes state["history"] is
|
421 |
-
# The check at the start of the node handles if it's None.
|
422 |
])
|
423 |
|
424 |
formatted_messages = prompt.format_messages(
|
@@ -430,28 +428,26 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
430 |
current_thoughts=state["current_thoughts"]
|
431 |
)
|
432 |
|
433 |
-
# Filter out any None messages if they somehow appeared
|
434 |
filtered_messages = [msg for msg in formatted_messages if msg is not None]
|
435 |
|
436 |
try:
|
437 |
full_input_string = tokenizer.apply_chat_template(
|
438 |
-
filtered_messages,
|
439 |
tokenize=False,
|
440 |
add_generation_prompt=True
|
441 |
)
|
442 |
except Exception as e:
|
443 |
print(f"WARNING: Failed to apply chat template: {e}. Falling back to simple string join. Model performance may be affected.")
|
444 |
-
# Filter again just in case, before accessing .content
|
445 |
full_input_string = "\n".join([msg.content for msg in filtered_messages if msg is not None])
|
446 |
|
447 |
def call_with_retry_local(inputs, retries=3):
|
448 |
for attempt in range(retries):
|
449 |
try:
|
450 |
response_text = llm.invoke(inputs)
|
451 |
-
if response_text is None:
|
452 |
raise ValueError("LLM invoke returned None response_text.")
|
453 |
|
454 |
-
# Ensure response_text is a string before calling .replace()
|
455 |
content = response_text.replace(inputs, "").strip() if isinstance(response_text, str) else str(response_text).replace(inputs, "").strip()
|
456 |
|
457 |
print(f"DEBUG: RAW LOCAL LLM Response (Attempt {attempt+1}):\n---\n{content}\n---")
|
@@ -460,7 +456,6 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
460 |
return AIMessage(content=content)
|
461 |
except Exception as e:
|
462 |
print(f"[Retry {attempt+1}/{retries}] Local LLM returned invalid content or an error. Error: {e}. Retrying...")
|
463 |
-
# Safely preview content for debugging
|
464 |
safe_content_preview = content[:200] if isinstance(content, str) else "Content was not a string or is None."
|
465 |
print(f"Invalid content (partial): {safe_content_preview}...")
|
466 |
state["history"].append(AIMessage(content=f"[Parsing Error] The previous LLM output was not valid. Expected format: ```json{{\"Reasoning\": \"...\", \"Action\": \"...\", \"Action Input\": \"...\"}}```. Please ensure your response is ONLY valid JSON and strictly follows the format. Error: {e}"))
|
@@ -468,10 +463,33 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
468 |
raise RuntimeError("Failed after multiple retries due to local Hugging Face model issues or invalid JSON.")
|
469 |
|
470 |
response = call_with_retry_local(full_input_string)
|
471 |
-
# If response is None, it would have been caught by the ValueError in call_with_retry_local
|
472 |
content = response.content
|
473 |
|
474 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
475 |
|
476 |
def tool_node(state: AgentState) -> AgentState:
|
477 |
"""
|
@@ -480,29 +498,25 @@ def tool_node(state: AgentState) -> AgentState:
|
|
480 |
# --- Defensive checks at the start of the node ---
|
481 |
if state is None:
|
482 |
raise ValueError("tool_node received a None state object.")
|
483 |
-
if state.get("history") is None:
|
484 |
-
print("WARNING: 'history' is None on entry to tool_node. Re-initializing to empty list.")
|
485 |
-
state["history"] = []
|
486 |
-
if state.get("context") is None:
|
487 |
-
print("WARNING: 'context' is None on entry to tool_node. Re-initializing to empty dict.")
|
488 |
-
state["context"] = {}
|
489 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
490 |
print(f"DEBUG: Entering tool_node. Iteration: {state['iterations']}")
|
491 |
|
492 |
-
# Safely access tool_call_dict.
|
493 |
-
tool_call_dict = None
|
494 |
-
if isinstance(state["context"], dict):
|
495 |
-
tool_call_dict = state["context"].pop("pending_action", None)
|
496 |
-
else:
|
497 |
-
print("WARNING: state['context'] is not a dictionary in tool_node. Cannot pop pending_action.")
|
498 |
-
state["context"] = {} # Re-initialize if it's corrupted
|
499 |
|
500 |
if tool_call_dict is None:
|
501 |
-
error_message = "[Tool Error] No pending_action found in context
|
502 |
print(f"ERROR: {error_message}")
|
503 |
-
# Ensure state["history"] is a list before appending
|
504 |
-
if state.get("history") is None:
|
505 |
-
state["history"] = []
|
506 |
state["history"].append(AIMessage(content=error_message))
|
507 |
state["current_task"] = "Re-evaluate the situation; previous tool selection failed or was missing."
|
508 |
state["current_thoughts"] = "No tool action was found. I need to re-think my next step."
|
@@ -511,18 +525,16 @@ def tool_node(state: AgentState) -> AgentState:
|
|
511 |
tool_name = tool_call_dict.get("tool")
|
512 |
tool_input = tool_call_dict.get("input")
|
513 |
|
514 |
-
if not tool_name or tool_input is None:
|
515 |
error_message = f"[Tool Error] Invalid action request from LLM: Tool name '{tool_name}' or input '{tool_input}' was empty or None. LLM needs to provide valid 'Action' and 'Action Input'."
|
516 |
print(f"ERROR: {error_message}")
|
517 |
-
if state.get("history") is None:
|
518 |
-
state["history"] = []
|
519 |
state["history"].append(AIMessage(content=error_message))
|
520 |
-
state["context"].pop("pending_action", None)
|
521 |
return state
|
522 |
|
523 |
available_tools = state.get("tools", [])
|
524 |
-
|
525 |
-
|
526 |
|
527 |
tool_output = ""
|
528 |
|
@@ -541,24 +553,20 @@ def tool_node(state: AgentState) -> AgentState:
|
|
541 |
tool_output = f"[Tool Error] An error occurred while running '{tool_name}': {str(e)}"
|
542 |
print(f"ERROR: {tool_output}")
|
543 |
|
544 |
-
# Ensure state["history"] is a list before appending
|
545 |
-
if state.get("history") is None:
|
546 |
-
state["history"] = []
|
547 |
state["history"].append(AIMessage(content=tool_output))
|
548 |
|
549 |
print(f"DEBUG: Exiting tool_node. Tool output added to history. New history length: {len(state['history'])}")
|
550 |
return state
|
551 |
|
552 |
-
|
553 |
# ====== Agent Graph ======
|
554 |
-
def create_agent_workflow(tools: List[BaseTool]):
|
555 |
workflow = StateGraph(AgentState)
|
556 |
-
|
557 |
workflow.add_node("reason", reasoning_node)
|
558 |
workflow.add_node("action", tool_node)
|
559 |
-
|
560 |
workflow.set_entry_point("reason")
|
561 |
-
|
562 |
workflow.add_conditional_edges(
|
563 |
"reason",
|
564 |
should_continue,
|
@@ -568,13 +576,12 @@ def create_agent_workflow(tools: List[BaseTool]): # Use BaseTool for consistency
|
|
568 |
"end": END
|
569 |
}
|
570 |
)
|
571 |
-
|
572 |
workflow.add_edge("action", "reason")
|
573 |
-
|
574 |
app = workflow.compile()
|
575 |
return app
|
576 |
|
577 |
-
|
578 |
# ====== Agent Interface ======
|
579 |
class BasicAgent:
|
580 |
def __init__(self):
|
@@ -587,15 +594,9 @@ class BasicAgent:
|
|
587 |
VideoTranscriptionTool()
|
588 |
]
|
589 |
|
590 |
-
|
591 |
-
try:
|
592 |
-
self.vector_store = create_vector_store()
|
593 |
-
except Exception as e:
|
594 |
-
print(f"ERROR: Failed to create vector store: {str(e)}")
|
595 |
-
self.vector_store = None
|
596 |
-
|
597 |
self.workflow = create_agent_workflow(self.tools)
|
598 |
-
|
599 |
def __call__(self, question: str) -> str:
|
600 |
print(f"\n--- Agent received question: {question[:50]}{'...' if len(question) > 50 else ''} ---")
|
601 |
|
@@ -616,6 +617,11 @@ class BasicAgent:
|
|
616 |
try:
|
617 |
final_state = self.workflow.invoke(state, {"recursion_limit": 20})
|
618 |
|
|
|
|
|
|
|
|
|
|
|
619 |
if final_state.get("final_answer") is not None:
|
620 |
answer = final_state["final_answer"]
|
621 |
print(f"--- Agent returning FINAL ANSWER: {answer} ---")
|
@@ -637,7 +643,6 @@ class BasicAgent:
|
|
637 |
|
638 |
|
639 |
|
640 |
-
|
641 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
642 |
"""
|
643 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
|
|
135 |
|
136 |
class PythonExecutionTool(BaseTool):
|
137 |
name: str = "python_execution"
|
138 |
+
description: str = "Executes Python code for complex calculations, data manipulation, or logical operations. Always assign the final result to a variable named '_result_value'."
|
139 |
def _run(self, code: str) -> str:
|
140 |
print(f"DEBUG: Executing python_execution with code: {code}")
|
141 |
try:
|
142 |
local_vars = {}
|
|
|
|
|
143 |
exec(code, globals(), local_vars)
|
144 |
if '_result_value' in local_vars:
|
145 |
return str(local_vars['_result_value'])
|
|
|
287 |
# --- Defensive checks at the start of the node ---
|
288 |
if state is None:
|
289 |
raise ValueError("reasoning_node received a None state object.")
|
290 |
+
|
291 |
+
# Ensure context is a dictionary
|
292 |
+
if not isinstance(state.get("context"), dict):
|
293 |
+
print("WARNING: state['context'] is not a dictionary on entry to reasoning_node. Re-initializing to empty dict.")
|
|
|
294 |
state["context"] = {}
|
295 |
+
|
296 |
+
# Ensure history is a list
|
297 |
+
if not isinstance(state.get("history"), list):
|
298 |
+
print("WARNING: state['history'] is not a list on entry to reasoning_node. Re-initializing to empty list.")
|
299 |
+
state["history"] = []
|
300 |
+
|
301 |
+
# Ensure tools is a list
|
302 |
+
if not isinstance(state.get("tools"), list):
|
303 |
+
print("WARNING: state['tools'] is not a list on entry to reasoning_node. This might cause issues downstream.")
|
304 |
+
# If tools become None or corrupted, the tool_descriptions part will fail.
|
305 |
+
# It's better to log and proceed, assuming agent init sets them correctly.
|
306 |
|
307 |
print(f"DEBUG: Entering reasoning_node. Iteration: {state['iterations']}")
|
308 |
# Use .get() for safety when printing history length
|
309 |
print(f"DEBUG: Current history length: {len(state.get('history', []))}")
|
310 |
|
311 |
# Set defaults for state components that might be missing, although TypedDict implies presence
|
312 |
+
state.setdefault("context", {}) # Redundant if check above re-initializes, but harmless
|
313 |
state.setdefault("reasoning", "")
|
314 |
state.setdefault("iterations", 0)
|
315 |
state.setdefault("current_task", "Understand the question and plan the next step.")
|
|
|
321 |
state["final_answer"] = "Agent halted due to exceeding maximum allowed reasoning iterations."
|
322 |
return state
|
323 |
|
324 |
+
# Now that context is guaranteed a dict, this is safe
|
325 |
+
state["context"].pop("pending_action", None)
|
|
|
|
|
|
|
|
|
326 |
|
327 |
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
|
328 |
print(f"DEBUG: Loading local model: {model_name}...")
|
|
|
344 |
)
|
345 |
llm = HuggingFacePipeline(pipeline=pipe)
|
346 |
|
347 |
+
# Ensure state.get("tools") returns a list before iterating and that items are not None
|
348 |
tool_descriptions = "\n".join([
|
349 |
+
f"- **{t.name}**: {t.description}" for t in state.get("tools", []) if t is not None
|
350 |
])
|
351 |
|
352 |
if "vector_store" not in state["context"]:
|
|
|
356 |
vector_store = state["context"].get("vector_store")
|
357 |
if vector_store is None:
|
358 |
print("ERROR: Vector store is None after creation/retrieval in reasoning_node. Cannot perform similarity search.")
|
|
|
359 |
state["final_answer"] = "Internal error: Vector store not available."
|
360 |
return state
|
361 |
|
362 |
# Ensure question is a string for similarity_search
|
363 |
+
query_for_docs = state["question"] if isinstance(state.get("question"), str) else str(state["question"])
|
364 |
relevant_docs = vector_store.similarity_search(
|
365 |
query_for_docs,
|
366 |
k=3
|
|
|
371 |
rag_context += "\n---\n".join([doc.page_content for doc in relevant_docs if doc is not None])
|
372 |
|
373 |
|
374 |
+
system_prompt_template = (
|
375 |
"You are an expert problem solver, designed to provide concise and accurate answers. "
|
376 |
"Your process involves analyzing the question, intelligently selecting and using tools, "
|
377 |
"and synthesizing information.\n\n"
|
|
|
415 |
)
|
416 |
|
417 |
prompt = ChatPromptTemplate.from_messages([
|
418 |
+
SystemMessage(content=system_prompt_template),
|
419 |
+
*state["history"] # This assumes state["history"] is a list. The check at the start of the node handles if it's None.
|
|
|
420 |
])
|
421 |
|
422 |
formatted_messages = prompt.format_messages(
|
|
|
428 |
current_thoughts=state["current_thoughts"]
|
429 |
)
|
430 |
|
431 |
+
# Filter out any None messages if they somehow appeared before tokenization
|
432 |
filtered_messages = [msg for msg in formatted_messages if msg is not None]
|
433 |
|
434 |
try:
|
435 |
full_input_string = tokenizer.apply_chat_template(
|
436 |
+
filtered_messages,
|
437 |
tokenize=False,
|
438 |
add_generation_prompt=True
|
439 |
)
|
440 |
except Exception as e:
|
441 |
print(f"WARNING: Failed to apply chat template: {e}. Falling back to simple string join. Model performance may be affected.")
|
|
|
442 |
full_input_string = "\n".join([msg.content for msg in filtered_messages if msg is not None])
|
443 |
|
444 |
def call_with_retry_local(inputs, retries=3):
|
445 |
for attempt in range(retries):
|
446 |
try:
|
447 |
response_text = llm.invoke(inputs)
|
448 |
+
if response_text is None:
|
449 |
raise ValueError("LLM invoke returned None response_text.")
|
450 |
|
|
|
451 |
content = response_text.replace(inputs, "").strip() if isinstance(response_text, str) else str(response_text).replace(inputs, "").strip()
|
452 |
|
453 |
print(f"DEBUG: RAW LOCAL LLM Response (Attempt {attempt+1}):\n---\n{content}\n---")
|
|
|
456 |
return AIMessage(content=content)
|
457 |
except Exception as e:
|
458 |
print(f"[Retry {attempt+1}/{retries}] Local LLM returned invalid content or an error. Error: {e}. Retrying...")
|
|
|
459 |
safe_content_preview = content[:200] if isinstance(content, str) else "Content was not a string or is None."
|
460 |
print(f"Invalid content (partial): {safe_content_preview}...")
|
461 |
state["history"].append(AIMessage(content=f"[Parsing Error] The previous LLM output was not valid. Expected format: ```json{{\"Reasoning\": \"...\", \"Action\": \"...\", \"Action Input\": \"...\"}}```. Please ensure your response is ONLY valid JSON and strictly follows the format. Error: {e}"))
|
|
|
463 |
raise RuntimeError("Failed after multiple retries due to local Hugging Face model issues or invalid JSON.")
|
464 |
|
465 |
response = call_with_retry_local(full_input_string)
|
|
|
466 |
content = response.content
|
467 |
|
468 |
+
if not content.startswith("[Parsing Error]") and not content.startswith("[Local LLM Error]"):
|
469 |
+
state["history"].append(AIMessage(content=content))
|
470 |
+
|
471 |
+
state["reasoning"] += f"\nStep {state['iterations']}: {reasoning}"
|
472 |
+
state["current_thoughts"] = reasoning
|
473 |
+
|
474 |
+
if action.lower() == "final answer":
|
475 |
+
state["final_answer"] = action_input
|
476 |
+
print(f"DEBUG: Final answer set in state: {state['final_answer']}")
|
477 |
+
else:
|
478 |
+
state["context"]["pending_action"] = {
|
479 |
+
"tool": action,
|
480 |
+
"input": action_input
|
481 |
+
}
|
482 |
+
if action and action != "No Action":
|
483 |
+
state["history"].append(AIMessage(content=f"Agent decided to use tool: {action} with input: {action_input}"))
|
484 |
+
elif action == "No Action":
|
485 |
+
state["history"].append(AIMessage(content=f"Agent decided to take 'No Action' but needs to proceed."))
|
486 |
+
if not state.get("final_answer"):
|
487 |
+
state["current_task"] = "Re-evaluate the situation and attempt to find a final answer or a new tool."
|
488 |
+
state["current_thoughts"] = "The previous step resulted in 'No Action'. I need to re-think my next step."
|
489 |
+
state["context"].pop("pending_action", None)
|
490 |
+
|
491 |
+
print(f"DEBUG: Exiting reasoning_node. New history length: {len(state['history'])}")
|
492 |
+
return state
|
493 |
|
494 |
def tool_node(state: AgentState) -> AgentState:
|
495 |
"""
|
|
|
498 |
# --- Defensive checks at the start of the node ---
|
499 |
if state is None:
|
500 |
raise ValueError("tool_node received a None state object.")
|
|
|
|
|
|
|
|
|
|
|
|
|
501 |
|
502 |
+
# Ensure context is a dictionary
|
503 |
+
if not isinstance(state.get("context"), dict):
|
504 |
+
print("WARNING: state['context'] is not a dictionary on entry to tool_node. Re-initializing to empty dict.")
|
505 |
+
state["context"] = {}
|
506 |
+
|
507 |
+
# Ensure history is a list
|
508 |
+
if not isinstance(state.get("history"), list):
|
509 |
+
print("WARNING: state['history'] is not a list on entry to tool_node. Re-initializing to empty list.")
|
510 |
+
state["history"] = []
|
511 |
+
|
512 |
print(f"DEBUG: Entering tool_node. Iteration: {state['iterations']}")
|
513 |
|
514 |
+
# Safely access tool_call_dict. Context is guaranteed to be a dict here.
|
515 |
+
tool_call_dict = state["context"].pop("pending_action", None)
|
|
|
|
|
|
|
|
|
|
|
516 |
|
517 |
if tool_call_dict is None:
|
518 |
+
error_message = "[Tool Error] No pending_action found in context. This indicates an issue with graph flow or a previous error."
|
519 |
print(f"ERROR: {error_message}")
|
|
|
|
|
|
|
520 |
state["history"].append(AIMessage(content=error_message))
|
521 |
state["current_task"] = "Re-evaluate the situation; previous tool selection failed or was missing."
|
522 |
state["current_thoughts"] = "No tool action was found. I need to re-think my next step."
|
|
|
525 |
tool_name = tool_call_dict.get("tool")
|
526 |
tool_input = tool_call_dict.get("input")
|
527 |
|
528 |
+
if not tool_name or tool_input is None:
|
529 |
error_message = f"[Tool Error] Invalid action request from LLM: Tool name '{tool_name}' or input '{tool_input}' was empty or None. LLM needs to provide valid 'Action' and 'Action Input'."
|
530 |
print(f"ERROR: {error_message}")
|
|
|
|
|
531 |
state["history"].append(AIMessage(content=error_message))
|
532 |
+
state["context"].pop("pending_action", None)
|
533 |
return state
|
534 |
|
535 |
available_tools = state.get("tools", [])
|
536 |
+
# Filter out any None tools before iterating
|
537 |
+
tool_fn = next((t for t in available_tools if t is not None and t.name == tool_name), None)
|
538 |
|
539 |
tool_output = ""
|
540 |
|
|
|
553 |
tool_output = f"[Tool Error] An error occurred while running '{tool_name}': {str(e)}"
|
554 |
print(f"ERROR: {tool_output}")
|
555 |
|
|
|
|
|
|
|
556 |
state["history"].append(AIMessage(content=tool_output))
|
557 |
|
558 |
print(f"DEBUG: Exiting tool_node. Tool output added to history. New history length: {len(state['history'])}")
|
559 |
return state
|
560 |
|
|
|
561 |
# ====== Agent Graph ======
|
562 |
+
def create_agent_workflow(tools: List[BaseTool]):
|
563 |
workflow = StateGraph(AgentState)
|
564 |
+
|
565 |
workflow.add_node("reason", reasoning_node)
|
566 |
workflow.add_node("action", tool_node)
|
567 |
+
|
568 |
workflow.set_entry_point("reason")
|
569 |
+
|
570 |
workflow.add_conditional_edges(
|
571 |
"reason",
|
572 |
should_continue,
|
|
|
576 |
"end": END
|
577 |
}
|
578 |
)
|
579 |
+
|
580 |
workflow.add_edge("action", "reason")
|
581 |
+
|
582 |
app = workflow.compile()
|
583 |
return app
|
584 |
|
|
|
585 |
# ====== Agent Interface ======
|
586 |
class BasicAgent:
|
587 |
def __init__(self):
|
|
|
594 |
VideoTranscriptionTool()
|
595 |
]
|
596 |
|
597 |
+
self.vector_store = create_vector_store()
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
self.workflow = create_agent_workflow(self.tools)
|
599 |
+
|
600 |
def __call__(self, question: str) -> str:
|
601 |
print(f"\n--- Agent received question: {question[:50]}{'...' if len(question) > 50 else ''} ---")
|
602 |
|
|
|
617 |
try:
|
618 |
final_state = self.workflow.invoke(state, {"recursion_limit": 20})
|
619 |
|
620 |
+
# It's highly unlikely final_state would be None if invoke completes,
|
621 |
+
# but this check is harmless and covers an extreme edge case.
|
622 |
+
if final_state is None:
|
623 |
+
return "Agent workflow completed but returned a None state. This is unexpected."
|
624 |
+
|
625 |
if final_state.get("final_answer") is not None:
|
626 |
answer = final_state["final_answer"]
|
627 |
print(f"--- Agent returning FINAL ANSWER: {answer} ---")
|
|
|
643 |
|
644 |
|
645 |
|
|
|
646 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
647 |
"""
|
648 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|