Spaces:
Sleeping
Sleeping
simplify
Browse files
agent.py
CHANGED
@@ -21,13 +21,6 @@ from tools import (
|
|
21 |
|
22 |
# βββββββββββββββββββββββββββ Configuration βββββββββββββββββββββββββββββββ
|
23 |
MAX_TOOL_CALLS = 5
|
24 |
-
AGENT_TIMEOUT = 300 # 5 minutes timeout for agent execution
|
25 |
-
|
26 |
-
class TimeoutError(Exception):
|
27 |
-
pass
|
28 |
-
|
29 |
-
def timeout_handler(signum, frame):
|
30 |
-
raise TimeoutError("Agent execution timed out")
|
31 |
|
32 |
# βββββββββββββββββββββββββββ Helper utilities ββββββββββββββββββββββββββββ
|
33 |
|
@@ -41,13 +34,8 @@ def timeout_handler(signum, frame):
|
|
41 |
# βββββββββββββββββββββββββββ Graph wiring βββββββββββββββββββββββββββββββ
|
42 |
|
43 |
def build_graph():
|
44 |
-
"""Build and return a create_react_agent
|
45 |
-
llm = ChatOpenAI(
|
46 |
-
model_name="gpt-4o-mini",
|
47 |
-
temperature=0.1, # Lower temperature for more consistent responses
|
48 |
-
max_tokens=2000, # Ensure reasonable response length
|
49 |
-
timeout=60 # 1 minute timeout for LLM calls
|
50 |
-
)
|
51 |
|
52 |
llm_tools = [
|
53 |
wikipedia_search_tool,
|
|
|
21 |
|
22 |
# βββββββββββββββββββββββββββ Configuration βββββββββββββββββββββββββββββββ
|
23 |
MAX_TOOL_CALLS = 5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# βββββββββββββββββββββββββββ Helper utilities ββββββββββββββββββββββββββββ
|
26 |
|
|
|
34 |
# βββββββββββββββββββββββββββ Graph wiring βββββββββββββββββββββββββββββββ
|
35 |
|
36 |
def build_graph():
|
37 |
+
"""Build and return a create_react_agent."""
|
38 |
+
llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0.3)
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
llm_tools = [
|
41 |
wikipedia_search_tool,
|
app.py
CHANGED
@@ -16,38 +16,9 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
16 |
SYSTEM_PROMPT = """
|
17 |
You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
18 |
|
19 |
-
CRITICAL REQUIREMENT - ALWAYS PROVIDE AN ANSWER:
|
20 |
-
- You MUST always provide a FINAL ANSWER, no matter what happens
|
21 |
-
- If tools fail, provide the best answer you can based on your knowledge
|
22 |
-
- If information is incomplete, make a reasonable inference or educated guess
|
23 |
-
- If you cannot find specific information, provide a general or approximate answer
|
24 |
-
- Never say "I cannot answer" or "I don't know" - always attempt to provide some form of answer
|
25 |
-
- Even if uncertain, provide your best estimate and acknowledge the uncertainty if needed
|
26 |
-
|
27 |
IMPORTANT: When using tools that require file access (such as audio_transcriber_tool, excel_tool, analyze_code_tool, or image_tool), ALWAYS use the task_id parameter only. Do NOT use any file names mentioned by the user - ignore them completely and only pass the task_id.
|
28 |
|
29 |
-
|
30 |
-
- If wikipedia_search_tool fails or returns insufficient/irrelevant results, try these fallback strategies:
|
31 |
-
1. Try wikipedia_search_tool again with a broader, more general query (remove specific terms, use synonyms)
|
32 |
-
2. If Wikipedia still doesn't help, try arxiv_search_tool for academic/research topics
|
33 |
-
3. You can use multiple search attempts with different keywords to find better information
|
34 |
-
- Always evaluate if the search results are relevant and sufficient before proceeding to your final answer
|
35 |
-
- IMPORTANT: When you see [END_OF_SEARCH] in tool results, this means the search is complete and you have all available information
|
36 |
-
- Do NOT perform additional searches after seeing [END_OF_SEARCH] - immediately proceed to analyze the provided information and give your final answer
|
37 |
-
- The [END_OF_SEARCH] marker indicates you should stop searching and work with what you have
|
38 |
-
|
39 |
-
RECURSION LIMIT HANDLING:
|
40 |
-
- You have a maximum of 8 steps to complete your task
|
41 |
-
- If you reach the recursion limit, you MUST provide a final answer based on whatever information you have gathered so far
|
42 |
-
- Do NOT say you cannot answer due to recursion limits - always provide the best answer possible with available information
|
43 |
-
- If you have partial information, use it to make a reasonable inference or educated guess
|
44 |
-
- Better to provide an approximate answer than no answer at all
|
45 |
-
|
46 |
-
ERROR HANDLING:
|
47 |
-
- If any tool fails, acknowledge the failure but continue with your analysis
|
48 |
-
- Use your existing knowledge to compensate for failed tool calls
|
49 |
-
- Always end with a FINAL ANSWER regardless of tool failures or errors
|
50 |
-
- Frame uncertain answers appropriately but still provide them
|
51 |
"""
|
52 |
|
53 |
|
@@ -56,115 +27,42 @@ class BasicAgent:
|
|
56 |
print("BasicAgent initialized.")
|
57 |
self.graph = build_graph()
|
58 |
|
59 |
-
def extract_final_answer(self, content: str) -> str:
|
60 |
-
"""Extract final answer from content with multiple fallback strategies."""
|
61 |
-
if not content:
|
62 |
-
return "No content generated"
|
63 |
-
|
64 |
-
# Strategy 1: Look for FINAL ANSWER: pattern (case insensitive)
|
65 |
-
final_answer_patterns = [
|
66 |
-
r'FINAL ANSWER:\s*(.+?)(?:\n|$)',
|
67 |
-
r'Final Answer:\s*(.+?)(?:\n|$)',
|
68 |
-
r'final answer:\s*(.+?)(?:\n|$)',
|
69 |
-
r'Answer:\s*(.+?)(?:\n|$)',
|
70 |
-
r'ANSWER:\s*(.+?)(?:\n|$)'
|
71 |
-
]
|
72 |
-
|
73 |
-
for pattern in final_answer_patterns:
|
74 |
-
match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
|
75 |
-
if match:
|
76 |
-
answer = match.group(1).strip()
|
77 |
-
if answer:
|
78 |
-
return answer
|
79 |
-
|
80 |
-
# Strategy 2: Look for the last meaningful sentence/paragraph
|
81 |
-
# Split by sentences and take the last non-empty one
|
82 |
-
sentences = [s.strip() for s in content.split('.') if s.strip()]
|
83 |
-
if sentences:
|
84 |
-
last_sentence = sentences[-1]
|
85 |
-
# If it's too long, truncate it
|
86 |
-
if len(last_sentence) > 200:
|
87 |
-
last_sentence = last_sentence[:200] + "..."
|
88 |
-
return last_sentence
|
89 |
-
|
90 |
-
# Strategy 3: Take the last line that's not empty
|
91 |
-
lines = [line.strip() for line in content.split('\n') if line.strip()]
|
92 |
-
if lines:
|
93 |
-
return lines[-1]
|
94 |
-
|
95 |
-
# Strategy 4: Return truncated content as fallback
|
96 |
-
return content[:200] + "..." if len(content) > 200 else content
|
97 |
-
|
98 |
def __call__(self, question: str, task_id: Optional[str] = None) -> str:
|
99 |
"""Run the agent and return whatever FINAL_ANSWER the graph produces."""
|
100 |
print(f"Agent received question: {question}")
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
signal.alarm(180) # 3 minute timeout
|
126 |
-
|
127 |
-
try:
|
128 |
-
out_state = self.graph.invoke(init_state, {"recursion_limit": 8})
|
129 |
-
finally:
|
130 |
-
signal.alarm(0) # Clear the alarm
|
131 |
-
|
132 |
-
except TimeoutError:
|
133 |
-
print("Agent execution timed out")
|
134 |
-
return f"I need to provide a quick answer for '{question}' due to time constraints. Based on general knowledge, this question would likely require research into the specific topic. Please consider consulting reliable sources for the most accurate information."
|
135 |
-
except Exception as e:
|
136 |
-
print(f"Graph execution error: {e}")
|
137 |
-
# Fallback: try with simpler approach
|
138 |
-
return f"Based on the question '{question}', I cannot provide a complete analysis due to technical limitations. However, I would recommend researching this topic further for a comprehensive answer."
|
139 |
-
|
140 |
-
# Extract the final answer from the last message
|
141 |
-
if out_state and "messages" in out_state and out_state["messages"]:
|
142 |
-
last_message = out_state["messages"][-1]
|
143 |
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
print("\n\n\n\n")
|
148 |
-
|
149 |
-
final_answer = self.extract_final_answer(content)
|
150 |
return final_answer
|
151 |
else:
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
if message.type == 'ai': # Only look at AI messages
|
156 |
-
final_answer = self.extract_final_answer(message.content)
|
157 |
-
if final_answer and final_answer != "No content generated":
|
158 |
-
return final_answer
|
159 |
-
|
160 |
-
# If we can't extract from messages, provide a fallback response
|
161 |
-
print("No valid content found in messages, providing fallback response")
|
162 |
-
return f"I was unable to provide a complete answer to the question: '{question}'. This may require additional research or clarification."
|
163 |
-
|
164 |
-
except Exception as e:
|
165 |
-
print(f"Unexpected error in agent execution: {e}")
|
166 |
-
# Always provide some form of answer, even if there's an error
|
167 |
-
return f"I encountered an error while processing the question: '{question}'. The issue appears to be technical in nature. Please try rephrasing the question or contact support if the problem persists."
|
168 |
|
169 |
|
170 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
16 |
SYSTEM_PROMPT = """
|
17 |
You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
IMPORTANT: When using tools that require file access (such as audio_transcriber_tool, excel_tool, analyze_code_tool, or image_tool), ALWAYS use the task_id parameter only. Do NOT use any file names mentioned by the user - ignore them completely and only pass the task_id.
|
20 |
|
21 |
+
You MUST always provide a FINAL ANSWER. If you cannot find complete information, provide your best estimate based on available information.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
"""
|
23 |
|
24 |
|
|
|
27 |
print("BasicAgent initialized.")
|
28 |
self.graph = build_graph()
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def __call__(self, question: str, task_id: Optional[str] = None) -> str:
|
31 |
"""Run the agent and return whatever FINAL_ANSWER the graph produces."""
|
32 |
print(f"Agent received question: {question}")
|
33 |
|
34 |
+
# Create system prompt with task_id included
|
35 |
+
system_prompt_with_task = SYSTEM_PROMPT
|
36 |
+
if task_id:
|
37 |
+
system_prompt_with_task += f"\n\nIMPORTANT: Your current task_id is: {task_id}. When using any tools that require a task_id parameter (audio_transcriber_tool, excel_tool, analyze_code_tool, image_tool), use this exact task_id: {task_id}"
|
38 |
+
|
39 |
+
# Initialize the state properly with all required fields
|
40 |
+
init_state = {
|
41 |
+
"messages": [
|
42 |
+
SystemMessage(content=system_prompt_with_task),
|
43 |
+
HumanMessage(content=question)
|
44 |
+
]
|
45 |
+
}
|
46 |
+
|
47 |
+
# Run the agent
|
48 |
+
out_state = self.graph.invoke(init_state, {"recursion_limit": 8})
|
49 |
+
|
50 |
+
# Extract the final answer from the last message
|
51 |
+
if out_state and "messages" in out_state:
|
52 |
+
last_message = out_state["messages"][-1]
|
53 |
+
if hasattr(last_message, 'content'):
|
54 |
+
content = last_message.content
|
55 |
+
print("content: ", content)
|
56 |
+
print("\n\n\n\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
# Look for FINAL ANSWER: pattern
|
59 |
+
if "FINAL ANSWER:" in content:
|
60 |
+
final_answer = content.split("FINAL ANSWER:")[-1].strip()
|
|
|
|
|
|
|
61 |
return final_answer
|
62 |
else:
|
63 |
+
return content
|
64 |
+
|
65 |
+
return "No answer generated."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
tools.py
CHANGED
@@ -14,22 +14,7 @@ from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
|
|
14 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
15 |
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
def safe_tool_execution(tool_func):
|
20 |
-
"""Decorator to ensure tools always return a useful response."""
|
21 |
-
@functools.wraps(tool_func)
|
22 |
-
def wrapper(*args, **kwargs):
|
23 |
-
try:
|
24 |
-
result = tool_func(*args, **kwargs)
|
25 |
-
if result and result.strip():
|
26 |
-
return result
|
27 |
-
else:
|
28 |
-
return f"Tool {tool_func.__name__} completed but returned no content. Please try a different approach or use general knowledge."
|
29 |
-
except Exception as e:
|
30 |
-
print(f"Error in {tool_func.__name__}: {e}")
|
31 |
-
return f"Tool {tool_func.__name__} encountered an error: {str(e)}. Please continue with available information or try an alternative approach."
|
32 |
-
return wrapper
|
33 |
|
34 |
def _download_file_for_task(task_id: str, ext: str) -> str:
|
35 |
"""
|
@@ -58,7 +43,6 @@ def _download_file_for_task(task_id: str, ext: str) -> str:
|
|
58 |
return ""
|
59 |
|
60 |
@tool
|
61 |
-
@safe_tool_execution
|
62 |
def image_tool(task_id: str) -> str:
|
63 |
"""
|
64 |
Expects: task_id (str) β a valid image task ID.
|
@@ -125,7 +109,6 @@ def image_tool(task_id: str) -> str:
|
|
125 |
|
126 |
|
127 |
@tool
|
128 |
-
@safe_tool_execution
|
129 |
def excel_tool(task_id: str) -> str:
|
130 |
"""
|
131 |
Downloads <task_id>.xlsx (if any) and returns a stringified list of
|
@@ -156,7 +139,6 @@ def excel_tool(task_id: str) -> str:
|
|
156 |
|
157 |
import openai
|
158 |
@tool
|
159 |
-
@safe_tool_execution
|
160 |
def audio_transcriber_tool(task_id: str) -> str:
|
161 |
"""
|
162 |
LangGraph tool for transcribing audio via OpenAI's Whisper API.
|
@@ -204,7 +186,6 @@ import re
|
|
204 |
import requests
|
205 |
|
206 |
@tool
|
207 |
-
@safe_tool_execution
|
208 |
def wikipedia_search_tool(wiki_query: str) -> str:
|
209 |
"""
|
210 |
Searches Wikipedia for the given query and returns the first 5 pages.
|
@@ -269,7 +250,6 @@ def wikipedia_search_tool(wiki_query: str) -> str:
|
|
269 |
return error_msg
|
270 |
|
271 |
@tool
|
272 |
-
@safe_tool_execution
|
273 |
def arxiv_search_tool(arxiv_query: str) -> str:
|
274 |
"""
|
275 |
Searches Arxiv for the given query and returns the first 5 pages.
|
@@ -339,7 +319,6 @@ from langchain.schema import SystemMessage, HumanMessage
|
|
339 |
LLM = ChatOpenAI(model_name="gpt-4.1-mini", temperature=0.2)
|
340 |
|
341 |
@tool
|
342 |
-
@safe_tool_execution
|
343 |
def analyze_code_tool(task_id: str) -> str:
|
344 |
"""
|
345 |
Either task_id OR (file + task_id)
|
|
|
14 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
15 |
|
16 |
|
17 |
+
# Removed complex safety wrapper - keeping things simple
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def _download_file_for_task(task_id: str, ext: str) -> str:
|
20 |
"""
|
|
|
43 |
return ""
|
44 |
|
45 |
@tool
|
|
|
46 |
def image_tool(task_id: str) -> str:
|
47 |
"""
|
48 |
Expects: task_id (str) β a valid image task ID.
|
|
|
109 |
|
110 |
|
111 |
@tool
|
|
|
112 |
def excel_tool(task_id: str) -> str:
|
113 |
"""
|
114 |
Downloads <task_id>.xlsx (if any) and returns a stringified list of
|
|
|
139 |
|
140 |
import openai
|
141 |
@tool
|
|
|
142 |
def audio_transcriber_tool(task_id: str) -> str:
|
143 |
"""
|
144 |
LangGraph tool for transcribing audio via OpenAI's Whisper API.
|
|
|
186 |
import requests
|
187 |
|
188 |
@tool
|
|
|
189 |
def wikipedia_search_tool(wiki_query: str) -> str:
|
190 |
"""
|
191 |
Searches Wikipedia for the given query and returns the first 5 pages.
|
|
|
250 |
return error_msg
|
251 |
|
252 |
@tool
|
|
|
253 |
def arxiv_search_tool(arxiv_query: str) -> str:
|
254 |
"""
|
255 |
Searches Arxiv for the given query and returns the first 5 pages.
|
|
|
319 |
LLM = ChatOpenAI(model_name="gpt-4.1-mini", temperature=0.2)
|
320 |
|
321 |
@tool
|
|
|
322 |
def analyze_code_tool(task_id: str) -> str:
|
323 |
"""
|
324 |
Either task_id OR (file + task_id)
|