Spaces:
Runtime error
Runtime error
Fix
Browse files
agent.py
CHANGED
@@ -17,15 +17,20 @@ from langgraph.prebuilt import tools_condition, ToolNode
|
|
17 |
from langchain_huggingface import HuggingFaceEmbeddings
|
18 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
19 |
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
|
20 |
-
|
|
|
|
|
|
|
21 |
from langchain_core.documents import Document
|
22 |
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
23 |
from langchain_core.tools import tool
|
24 |
from langchain_core.language_models.base import BaseLanguageModel
|
25 |
from langchain.tools.retriever import create_retriever_tool
|
26 |
-
|
27 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
28 |
-
|
|
|
|
|
29 |
import json
|
30 |
import requests
|
31 |
from typing import List, Dict, Any
|
@@ -33,7 +38,7 @@ import re
|
|
33 |
import math
|
34 |
from datetime import datetime
|
35 |
|
36 |
-
# Custom HuggingFace LLM wrapper
|
37 |
class SimpleHuggingFaceLLM(BaseLanguageModel):
|
38 |
def __init__(self, repo_id: str, hf_token: str):
|
39 |
super().__init__()
|
@@ -41,6 +46,30 @@ class SimpleHuggingFaceLLM(BaseLanguageModel):
|
|
41 |
self.hf_token = hf_token
|
42 |
self.api_url = f"https://api-inference.huggingface.co/models/{repo_id}"
|
43 |
self.headers = {"Authorization": f"Bearer {hf_token}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
def _generate(self, messages, stop=None, run_manager=None, **kwargs):
|
46 |
# Convert messages to a single prompt
|
@@ -54,25 +83,34 @@ class SimpleHuggingFaceLLM(BaseLanguageModel):
|
|
54 |
"parameters": {
|
55 |
"max_new_tokens": 512,
|
56 |
"temperature": 0.1,
|
57 |
-
"return_full_text": False
|
|
|
58 |
}
|
59 |
}
|
60 |
|
61 |
try:
|
62 |
-
response = requests.post(self.api_url, headers=self.headers, json=payload)
|
63 |
if response.status_code == 200:
|
64 |
result = response.json()
|
65 |
if isinstance(result, list) and len(result) > 0:
|
66 |
generated_text = result[0].get('generated_text', '')
|
|
|
|
|
67 |
else:
|
68 |
generated_text = str(result)
|
69 |
|
70 |
from langchain_core.outputs import LLMResult, Generation
|
71 |
return LLMResult(generations=[[Generation(text=generated_text)]])
|
72 |
else:
|
73 |
-
|
|
|
|
|
|
|
74 |
except Exception as e:
|
75 |
-
|
|
|
|
|
|
|
76 |
|
77 |
def invoke(self, input, config=None, **kwargs):
|
78 |
if isinstance(input, list):
|
@@ -87,6 +125,11 @@ class SimpleHuggingFaceLLM(BaseLanguageModel):
|
|
87 |
@property
|
88 |
def _llm_type(self):
|
89 |
return "huggingface_custom"
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
# ---- Enhanced Tools ----
|
92 |
|
@@ -217,17 +260,35 @@ def simple_calculation(expression: str) -> str:
|
|
217 |
except Exception as e:
|
218 |
return f"Calculation error: {str(e)}"
|
219 |
|
220 |
-
# ---- Embedding & Vector Store Setup ----
|
221 |
def setup_vector_store():
|
222 |
try:
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
# Check if metadata.jsonl exists and load it
|
226 |
if os.path.exists('metadata.jsonl'):
|
227 |
json_QA = []
|
228 |
with open('metadata.jsonl', 'r') as jsonl_file:
|
229 |
for line in jsonl_file:
|
230 |
-
if line.strip():
|
231 |
try:
|
232 |
json_QA.append(json.loads(line))
|
233 |
except:
|
@@ -244,29 +305,37 @@ def setup_vector_store():
|
|
244 |
documents.append(doc)
|
245 |
|
246 |
if documents:
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
|
|
|
|
|
|
256 |
|
257 |
# Create empty vector store if no data
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
|
|
|
|
|
|
|
|
265 |
|
266 |
except Exception as e:
|
267 |
print(f"Vector store setup error: {e}")
|
268 |
return None
|
269 |
|
|
|
270 |
vector_store = setup_vector_store()
|
271 |
|
272 |
@tool
|
@@ -325,21 +394,26 @@ tools = [
|
|
325 |
wiki_search, web_search, similar_question_search
|
326 |
]
|
327 |
|
328 |
-
# ---- Graph Definition ----
|
329 |
def build_graph(provider: str = "huggingface"):
|
330 |
"""Build the agent graph with custom HuggingFace integration"""
|
331 |
|
332 |
if provider == "huggingface":
|
333 |
-
|
|
|
|
|
|
|
334 |
models_to_try = [
|
|
|
335 |
"google/flan-t5-base",
|
336 |
-
"
|
337 |
-
"
|
338 |
]
|
339 |
|
340 |
llm = None
|
341 |
for model_id in models_to_try:
|
342 |
try:
|
|
|
343 |
llm = SimpleHuggingFaceLLM(repo_id=model_id, hf_token=hf_token)
|
344 |
print(f"Successfully initialized model: {model_id}")
|
345 |
break
|
@@ -348,7 +422,7 @@ def build_graph(provider: str = "huggingface"):
|
|
348 |
continue
|
349 |
|
350 |
if llm is None:
|
351 |
-
raise ValueError("Failed to initialize any HuggingFace model")
|
352 |
else:
|
353 |
raise ValueError("Only 'huggingface' provider is supported")
|
354 |
|
@@ -388,7 +462,7 @@ def build_graph(provider: str = "huggingface"):
|
|
388 |
|
389 |
return {"messages": context_messages + messages}
|
390 |
|
391 |
-
# Build simplified graph
|
392 |
builder = StateGraph(MessagesState)
|
393 |
builder.add_node("retriever", retriever)
|
394 |
builder.add_node("assistant", assistant)
|
|
|
17 |
from langchain_huggingface import HuggingFaceEmbeddings
|
18 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
19 |
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
|
20 |
+
try:
|
21 |
+
from langchain_community.vectorstores import Chroma
|
22 |
+
except ImportError:
|
23 |
+
from langchain.vectorstores import Chroma
|
24 |
from langchain_core.documents import Document
|
25 |
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
26 |
from langchain_core.tools import tool
|
27 |
from langchain_core.language_models.base import BaseLanguageModel
|
28 |
from langchain.tools.retriever import create_retriever_tool
|
29 |
+
try:
|
30 |
+
from langchain.embeddings import HuggingFaceEmbeddings as LegacyHFEmbeddings
|
31 |
+
except ImportError:
|
32 |
+
LegacyHFEmbeddings = HuggingFaceEmbeddings
|
33 |
+
from langchain.schema import Document as LegacyDocument
|
34 |
import json
|
35 |
import requests
|
36 |
from typing import List, Dict, Any
|
|
|
38 |
import math
|
39 |
from datetime import datetime
|
40 |
|
41 |
+
# Custom HuggingFace LLM wrapper with better error handling
|
42 |
class SimpleHuggingFaceLLM(BaseLanguageModel):
|
43 |
def __init__(self, repo_id: str, hf_token: str):
|
44 |
super().__init__()
|
|
|
46 |
self.hf_token = hf_token
|
47 |
self.api_url = f"https://api-inference.huggingface.co/models/{repo_id}"
|
48 |
self.headers = {"Authorization": f"Bearer {hf_token}"}
|
49 |
+
|
50 |
+
# Test the connection
|
51 |
+
self._test_connection()
|
52 |
+
|
53 |
+
def _test_connection(self):
|
54 |
+
"""Test if the model is accessible"""
|
55 |
+
payload = {
|
56 |
+
"inputs": "Hello",
|
57 |
+
"parameters": {
|
58 |
+
"max_new_tokens": 10,
|
59 |
+
"temperature": 0.1,
|
60 |
+
"return_full_text": False
|
61 |
+
}
|
62 |
+
}
|
63 |
+
|
64 |
+
try:
|
65 |
+
response = requests.post(self.api_url, headers=self.headers, json=payload, timeout=30)
|
66 |
+
if response.status_code != 200:
|
67 |
+
print(f"Model {self.repo_id} test failed with status {response.status_code}: {response.text}")
|
68 |
+
raise Exception(f"Model not accessible: {response.status_code}")
|
69 |
+
print(f"Model {self.repo_id} test successful")
|
70 |
+
except Exception as e:
|
71 |
+
print(f"Model {self.repo_id} connection test failed: {e}")
|
72 |
+
raise e
|
73 |
|
74 |
def _generate(self, messages, stop=None, run_manager=None, **kwargs):
|
75 |
# Convert messages to a single prompt
|
|
|
83 |
"parameters": {
|
84 |
"max_new_tokens": 512,
|
85 |
"temperature": 0.1,
|
86 |
+
"return_full_text": False,
|
87 |
+
"do_sample": False
|
88 |
}
|
89 |
}
|
90 |
|
91 |
try:
|
92 |
+
response = requests.post(self.api_url, headers=self.headers, json=payload, timeout=60)
|
93 |
if response.status_code == 200:
|
94 |
result = response.json()
|
95 |
if isinstance(result, list) and len(result) > 0:
|
96 |
generated_text = result[0].get('generated_text', '')
|
97 |
+
elif isinstance(result, dict):
|
98 |
+
generated_text = result.get('generated_text', str(result))
|
99 |
else:
|
100 |
generated_text = str(result)
|
101 |
|
102 |
from langchain_core.outputs import LLMResult, Generation
|
103 |
return LLMResult(generations=[[Generation(text=generated_text)]])
|
104 |
else:
|
105 |
+
error_msg = f"API Error {response.status_code}: {response.text[:200]}"
|
106 |
+
print(error_msg)
|
107 |
+
from langchain_core.outputs import LLMResult, Generation
|
108 |
+
return LLMResult(generations=[[Generation(text=f"Error: {error_msg}")]])
|
109 |
except Exception as e:
|
110 |
+
error_msg = f"Request failed: {str(e)}"
|
111 |
+
print(error_msg)
|
112 |
+
from langchain_core.outputs import LLMResult, Generation
|
113 |
+
return LLMResult(generations=[[Generation(text=error_msg)]])
|
114 |
|
115 |
def invoke(self, input, config=None, **kwargs):
|
116 |
if isinstance(input, list):
|
|
|
125 |
@property
|
126 |
def _llm_type(self):
|
127 |
return "huggingface_custom"
|
128 |
+
|
129 |
+
def _call(self, prompt: str, stop=None, run_manager=None, **kwargs):
|
130 |
+
"""Legacy method for compatibility"""
|
131 |
+
result = self._generate(prompt)
|
132 |
+
return result.generations[0][0].text
|
133 |
|
134 |
# ---- Enhanced Tools ----
|
135 |
|
|
|
260 |
except Exception as e:
|
261 |
return f"Calculation error: {str(e)}"
|
262 |
|
263 |
+
# ---- Embedding & Vector Store Setup with better error handling ----
|
264 |
def setup_vector_store():
|
265 |
try:
|
266 |
+
# Try different embedding models
|
267 |
+
embedding_models = [
|
268 |
+
"sentence-transformers/all-MiniLM-L6-v2",
|
269 |
+
"sentence-transformers/all-mpnet-base-v2"
|
270 |
+
]
|
271 |
+
|
272 |
+
embeddings = None
|
273 |
+
for model_name in embedding_models:
|
274 |
+
try:
|
275 |
+
embeddings = HuggingFaceEmbeddings(model_name=model_name)
|
276 |
+
print(f"Successfully loaded embeddings: {model_name}")
|
277 |
+
break
|
278 |
+
except Exception as e:
|
279 |
+
print(f"Failed to load embeddings {model_name}: {e}")
|
280 |
+
continue
|
281 |
+
|
282 |
+
if embeddings is None:
|
283 |
+
print("Could not load any embedding model, skipping vector store setup")
|
284 |
+
return None
|
285 |
|
286 |
# Check if metadata.jsonl exists and load it
|
287 |
if os.path.exists('metadata.jsonl'):
|
288 |
json_QA = []
|
289 |
with open('metadata.jsonl', 'r') as jsonl_file:
|
290 |
for line in jsonl_file:
|
291 |
+
if line.strip():
|
292 |
try:
|
293 |
json_QA.append(json.loads(line))
|
294 |
except:
|
|
|
305 |
documents.append(doc)
|
306 |
|
307 |
if documents:
|
308 |
+
try:
|
309 |
+
vector_store = Chroma.from_documents(
|
310 |
+
documents=documents,
|
311 |
+
embedding=embeddings,
|
312 |
+
persist_directory="./chroma_db",
|
313 |
+
collection_name="my_collection"
|
314 |
+
)
|
315 |
+
vector_store.persist()
|
316 |
+
print(f"Vector store created with {len(documents)} documents")
|
317 |
+
return vector_store
|
318 |
+
except Exception as e:
|
319 |
+
print(f"Error creating vector store with documents: {e}")
|
320 |
|
321 |
# Create empty vector store if no data
|
322 |
+
try:
|
323 |
+
vector_store = Chroma(
|
324 |
+
embedding_function=embeddings,
|
325 |
+
persist_directory="./chroma_db",
|
326 |
+
collection_name="my_collection"
|
327 |
+
)
|
328 |
+
print("Empty vector store created")
|
329 |
+
return vector_store
|
330 |
+
except Exception as e:
|
331 |
+
print(f"Error creating empty vector store: {e}")
|
332 |
+
return None
|
333 |
|
334 |
except Exception as e:
|
335 |
print(f"Vector store setup error: {e}")
|
336 |
return None
|
337 |
|
338 |
+
# Try to setup vector store, but don't fail if it doesn't work
|
339 |
vector_store = setup_vector_store()
|
340 |
|
341 |
@tool
|
|
|
394 |
wiki_search, web_search, similar_question_search
|
395 |
]
|
396 |
|
397 |
+
# ---- Graph Definition with better error handling ----
|
398 |
def build_graph(provider: str = "huggingface"):
|
399 |
"""Build the agent graph with custom HuggingFace integration"""
|
400 |
|
401 |
if provider == "huggingface":
|
402 |
+
if not hf_token:
|
403 |
+
raise ValueError("HUGGINGFACE_INFERENCE_TOKEN is required but not found in environment variables")
|
404 |
+
|
405 |
+
# Use custom HuggingFace LLM with better model selection
|
406 |
models_to_try = [
|
407 |
+
"microsoft/DialoGPT-medium",
|
408 |
"google/flan-t5-base",
|
409 |
+
"facebook/blenderbot-400M-distill",
|
410 |
+
"microsoft/DialoGPT-small"
|
411 |
]
|
412 |
|
413 |
llm = None
|
414 |
for model_id in models_to_try:
|
415 |
try:
|
416 |
+
print(f"Trying to initialize model: {model_id}")
|
417 |
llm = SimpleHuggingFaceLLM(repo_id=model_id, hf_token=hf_token)
|
418 |
print(f"Successfully initialized model: {model_id}")
|
419 |
break
|
|
|
422 |
continue
|
423 |
|
424 |
if llm is None:
|
425 |
+
raise ValueError("Failed to initialize any HuggingFace model. Please check your HUGGINGFACE_INFERENCE_TOKEN and internet connection.")
|
426 |
else:
|
427 |
raise ValueError("Only 'huggingface' provider is supported")
|
428 |
|
|
|
462 |
|
463 |
return {"messages": context_messages + messages}
|
464 |
|
465 |
+
# Build simplified graph
|
466 |
builder = StateGraph(MessagesState)
|
467 |
builder.add_node("retriever", retriever)
|
468 |
builder.add_node("assistant", assistant)
|
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
import requests
|
@@ -7,19 +8,23 @@ from agent import build_graph
|
|
7 |
from langchain_core.messages import HumanMessage
|
8 |
import time
|
9 |
|
10 |
-
# (Keep Constants as is)
|
11 |
# --- Constants ---
|
12 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
13 |
|
14 |
# --- Improved Agent Definition ---
|
15 |
class BasicAgent:
|
16 |
def __init__(self):
|
17 |
-
print("BasicAgent
|
18 |
try:
|
|
|
|
|
19 |
self.graph = build_graph()
|
20 |
print("Graph built successfully.")
|
21 |
except Exception as e:
|
22 |
print(f"Error building graph: {e}")
|
|
|
|
|
|
|
23 |
raise e
|
24 |
|
25 |
def __call__(self, question: str) -> str:
|
@@ -28,6 +33,8 @@ class BasicAgent:
|
|
28 |
try:
|
29 |
# Clean the question
|
30 |
question = question.strip()
|
|
|
|
|
31 |
|
32 |
# Wrap the question in a HumanMessage
|
33 |
messages = [HumanMessage(content=question)]
|
@@ -36,6 +43,7 @@ class BasicAgent:
|
|
36 |
max_retries = 3
|
37 |
for attempt in range(max_retries):
|
38 |
try:
|
|
|
39 |
result = self.graph.invoke({"messages": messages})
|
40 |
|
41 |
if 'messages' in result and result['messages']:
|
@@ -50,21 +58,36 @@ class BasicAgent:
|
|
50 |
# Additional cleanup
|
51 |
answer = answer.replace("Assistant: ", "").strip()
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
print(f"Agent answer (first 100 chars): {answer[:100]}...")
|
54 |
return answer
|
55 |
else:
|
56 |
return str(answer)
|
57 |
else:
|
|
|
|
|
|
|
|
|
58 |
return "No response generated"
|
59 |
|
60 |
except Exception as e:
|
61 |
print(f"Attempt {attempt + 1} failed: {e}")
|
62 |
if attempt == max_retries - 1:
|
63 |
return f"Error processing question: {str(e)}"
|
64 |
-
time.sleep(
|
65 |
|
66 |
except Exception as e:
|
67 |
print(f"Error in agent call: {e}")
|
|
|
|
|
68 |
return f"Agent error: {str(e)}"
|
69 |
|
70 |
|
@@ -94,6 +117,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
94 |
print("Agent initialized successfully.")
|
95 |
except Exception as e:
|
96 |
print(f"Error instantiating agent: {e}")
|
|
|
|
|
97 |
return f"Error initializing agent: {e}", None
|
98 |
|
99 |
# In the case of an app running as a Hugging Face space, this link points toward your codebase
|
@@ -148,20 +173,25 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
148 |
if not submitted_answer or submitted_answer.strip() == "":
|
149 |
submitted_answer = "No answer generated"
|
150 |
|
|
|
|
|
|
|
|
|
|
|
151 |
answers_payload.append({
|
152 |
"task_id": task_id,
|
153 |
-
"submitted_answer":
|
154 |
})
|
155 |
|
156 |
results_log.append({
|
157 |
"Task ID": task_id,
|
158 |
"Question": question_text[:200] + "..." if len(question_text) > 200 else question_text,
|
159 |
-
"Submitted Answer":
|
160 |
})
|
161 |
|
162 |
except Exception as e:
|
163 |
print(f"Error running agent on task {task_id}: {e}")
|
164 |
-
error_answer =
|
165 |
answers_payload.append({
|
166 |
"task_id": task_id,
|
167 |
"submitted_answer": error_answer
|
|
|
1 |
+
|
2 |
import os
|
3 |
import gradio as gr
|
4 |
import requests
|
|
|
8 |
from langchain_core.messages import HumanMessage
|
9 |
import time
|
10 |
|
|
|
11 |
# --- Constants ---
|
12 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
13 |
|
14 |
# --- Improved Agent Definition ---
|
15 |
class BasicAgent:
|
16 |
def __init__(self):
|
17 |
+
print("Initializing BasicAgent...")
|
18 |
try:
|
19 |
+
# Add more verbose logging
|
20 |
+
print("Building graph...")
|
21 |
self.graph = build_graph()
|
22 |
print("Graph built successfully.")
|
23 |
except Exception as e:
|
24 |
print(f"Error building graph: {e}")
|
25 |
+
print(f"Error type: {type(e).__name__}")
|
26 |
+
import traceback
|
27 |
+
traceback.print_exc()
|
28 |
raise e
|
29 |
|
30 |
def __call__(self, question: str) -> str:
|
|
|
33 |
try:
|
34 |
# Clean the question
|
35 |
question = question.strip()
|
36 |
+
if not question:
|
37 |
+
return "Empty question received"
|
38 |
|
39 |
# Wrap the question in a HumanMessage
|
40 |
messages = [HumanMessage(content=question)]
|
|
|
43 |
max_retries = 3
|
44 |
for attempt in range(max_retries):
|
45 |
try:
|
46 |
+
print(f"Attempt {attempt + 1} to process question...")
|
47 |
result = self.graph.invoke({"messages": messages})
|
48 |
|
49 |
if 'messages' in result and result['messages']:
|
|
|
58 |
# Additional cleanup
|
59 |
answer = answer.replace("Assistant: ", "").strip()
|
60 |
|
61 |
+
# Handle empty or error responses
|
62 |
+
if not answer or "Error:" in answer or "error" in answer.lower():
|
63 |
+
if attempt < max_retries - 1:
|
64 |
+
print(f"Got error response, retrying: {answer[:100]}")
|
65 |
+
time.sleep(2)
|
66 |
+
continue
|
67 |
+
else:
|
68 |
+
return "Unable to generate answer"
|
69 |
+
|
70 |
print(f"Agent answer (first 100 chars): {answer[:100]}...")
|
71 |
return answer
|
72 |
else:
|
73 |
return str(answer)
|
74 |
else:
|
75 |
+
print("No messages in result")
|
76 |
+
if attempt < max_retries - 1:
|
77 |
+
time.sleep(2)
|
78 |
+
continue
|
79 |
return "No response generated"
|
80 |
|
81 |
except Exception as e:
|
82 |
print(f"Attempt {attempt + 1} failed: {e}")
|
83 |
if attempt == max_retries - 1:
|
84 |
return f"Error processing question: {str(e)}"
|
85 |
+
time.sleep(2) # Brief pause before retry
|
86 |
|
87 |
except Exception as e:
|
88 |
print(f"Error in agent call: {e}")
|
89 |
+
import traceback
|
90 |
+
traceback.print_exc()
|
91 |
return f"Agent error: {str(e)}"
|
92 |
|
93 |
|
|
|
117 |
print("Agent initialized successfully.")
|
118 |
except Exception as e:
|
119 |
print(f"Error instantiating agent: {e}")
|
120 |
+
import traceback
|
121 |
+
traceback.print_exc()
|
122 |
return f"Error initializing agent: {e}", None
|
123 |
|
124 |
# In the case of an app running as a Hugging Face space, this link points toward your codebase
|
|
|
173 |
if not submitted_answer or submitted_answer.strip() == "":
|
174 |
submitted_answer = "No answer generated"
|
175 |
|
176 |
+
# Clean up the answer further
|
177 |
+
submitted_answer = str(submitted_answer).strip()
|
178 |
+
if submitted_answer.startswith("Error:") or submitted_answer.startswith("Agent error:"):
|
179 |
+
submitted_answer = "Unable to process question"
|
180 |
+
|
181 |
answers_payload.append({
|
182 |
"task_id": task_id,
|
183 |
+
"submitted_answer": submitted_answer
|
184 |
})
|
185 |
|
186 |
results_log.append({
|
187 |
"Task ID": task_id,
|
188 |
"Question": question_text[:200] + "..." if len(question_text) > 200 else question_text,
|
189 |
+
"Submitted Answer": submitted_answer
|
190 |
})
|
191 |
|
192 |
except Exception as e:
|
193 |
print(f"Error running agent on task {task_id}: {e}")
|
194 |
+
error_answer = "Processing error occurred"
|
195 |
answers_payload.append({
|
196 |
"task_id": task_id,
|
197 |
"submitted_answer": error_answer
|