Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -23,13 +23,104 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
23 |
|
24 |
# --- Basic Agent Definition ---
|
25 |
class BasicAgent:
|
|
|
26 |
def __init__(self):
|
27 |
print("BasicAgent initialized.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
def __call__(self, question: str) -> str:
|
29 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
35 |
"""
|
@@ -95,15 +186,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
95 |
print(f"Skipping item with missing task_id or question: {item}")
|
96 |
continue
|
97 |
try:
|
98 |
-
# Get agent response
|
99 |
-
|
100 |
-
agent_response = json.loads(agent_response_json)
|
101 |
-
|
102 |
-
model_answer = agent_response.get("model_answer", "")
|
103 |
-
reasoning_trace = agent_response.get("reasoning_trace", "")
|
104 |
-
|
105 |
-
# Format for submission payload
|
106 |
-
submitted_answer = model_answer
|
107 |
|
108 |
# Add to answers payload
|
109 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
@@ -112,15 +196,14 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
112 |
results_log.append({
|
113 |
"Task ID": task_id,
|
114 |
"Question": question_text,
|
115 |
-
"Submitted Answer": submitted_answer
|
116 |
-
"Reasoning": reasoning_trace[:100] + "..." if len(reasoning_trace) > 100 else reasoning_trace
|
117 |
})
|
118 |
|
119 |
-
# Add to JSONL output
|
120 |
jsonl_output.append({
|
121 |
"task_id": task_id,
|
122 |
-
"model_answer":
|
123 |
-
"reasoning_trace":
|
124 |
})
|
125 |
|
126 |
except Exception as e:
|
|
|
23 |
|
24 |
# --- Basic Agent Definition ---
|
25 |
class BasicAgent:
|
26 |
+
"""A LlamaIndex-based agent."""
|
27 |
def __init__(self):
|
28 |
print("BasicAgent initialized.")
|
29 |
+
try:
|
30 |
+
# Initialize the core components
|
31 |
+
self.llm = self._initialize_llm()
|
32 |
+
|
33 |
+
# Import get_tools from agent.py here to avoid circular imports
|
34 |
+
from agent import get_tools
|
35 |
+
self.tools = get_tools()
|
36 |
+
|
37 |
+
self.memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
|
38 |
+
# Build the agent
|
39 |
+
self.agent = self._build_agent()
|
40 |
+
print("Agent setup complete.")
|
41 |
+
except Exception as e:
|
42 |
+
print(f"Warning: Error during agent initialization: {e}")
|
43 |
+
# Continue despite error - we'll handle this in the __call__ method
|
44 |
+
|
45 |
+
def _initialize_llm(self) -> LLM:
|
46 |
+
"""Initialize the LLM based on configuration."""
|
47 |
+
provider = os.getenv("DEFAULT_LLM_PROVIDER", "gemini").lower()
|
48 |
+
|
49 |
+
if provider == "gemini":
|
50 |
+
api_key = os.getenv("GOOGLE_API_KEY")
|
51 |
+
if not api_key:
|
52 |
+
raise ValueError("GOOGLE_API_KEY not found in environment variables")
|
53 |
+
|
54 |
+
return Gemini(
|
55 |
+
model_name="models/gemini-1.5-flash",
|
56 |
+
api_key=api_key,
|
57 |
+
temperature=0.1,
|
58 |
+
top_p=0.95,
|
59 |
+
max_tokens=1024,
|
60 |
+
)
|
61 |
+
|
62 |
+
elif provider == "huggingface":
|
63 |
+
api_key = os.getenv("HUGGINGFACE_API_KEY")
|
64 |
+
if not api_key:
|
65 |
+
raise ValueError("HUGGINGFACE_API_KEY not found in environment variables")
|
66 |
+
|
67 |
+
return HuggingFaceInferenceAPI(
|
68 |
+
model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
|
69 |
+
api_key=api_key,
|
70 |
+
temperature=0.1,
|
71 |
+
max_tokens=1024,
|
72 |
+
)
|
73 |
+
else:
|
74 |
+
raise ValueError(f"Unsupported LLM provider: {provider}")
|
75 |
+
|
76 |
+
def _build_agent(self) -> ReActAgent:
|
77 |
+
"""Build and return the agent."""
|
78 |
+
# Load system prompt from file and append output format requirements
|
79 |
+
try:
|
80 |
+
with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
81 |
+
system_prompt = f.read()
|
82 |
+
# Append output format to system prompt
|
83 |
+
system_prompt = f"{system_prompt}\n\nIMPORTANT OUTPUT FORMAT:\n{OUTPUT_FORMAT}"
|
84 |
+
except Exception as e:
|
85 |
+
print(f"Error loading system prompt: {e}")
|
86 |
+
system_prompt = f"You are an intelligent agent designed to answer a wide variety of questions.\n\nIMPORTANT OUTPUT FORMAT:\n{OUTPUT_FORMAT}"
|
87 |
+
|
88 |
+
return ReActAgent.from_tools(
|
89 |
+
tools=self.tools,
|
90 |
+
llm=self.llm,
|
91 |
+
memory=self.memory,
|
92 |
+
system_prompt=system_prompt,
|
93 |
+
verbose=True,
|
94 |
+
)
|
95 |
+
|
96 |
def __call__(self, question: str) -> str:
|
97 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
98 |
+
|
99 |
+
try:
|
100 |
+
# Check if agent was properly initialized
|
101 |
+
if not hasattr(self, 'agent') or self.agent is None:
|
102 |
+
# Fallback to a simple response if agent initialization failed
|
103 |
+
return "I'm unable to process your request due to initialization errors."
|
104 |
+
|
105 |
+
# Process the question
|
106 |
+
response = self.agent.query(question)
|
107 |
+
answer_text = str(response)
|
108 |
+
|
109 |
+
# Extract the FINAL ANSWER part if it exists
|
110 |
+
if "FINAL ANSWER:" in answer_text:
|
111 |
+
reasoning_trace = answer_text.split("FINAL ANSWER:")[0].strip()
|
112 |
+
model_answer = answer_text.split("FINAL ANSWER:")[1].strip()
|
113 |
+
|
114 |
+
print(f"Agent generated answer: {model_answer[:50]}..." if len(model_answer) > 50 else f"Agent generated answer: {model_answer}")
|
115 |
+
return model_answer # Return just the answer part
|
116 |
+
else:
|
117 |
+
# If no FINAL ANSWER pattern, return the whole response
|
118 |
+
print(f"No 'FINAL ANSWER' found in response. Returning full response.")
|
119 |
+
return answer_text
|
120 |
+
|
121 |
+
except Exception as e:
|
122 |
+
print(f"Error generating answer: {e}")
|
123 |
+
return f"I encountered an error while answering your question: {str(e)}"
|
124 |
|
125 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
126 |
"""
|
|
|
186 |
print(f"Skipping item with missing task_id or question: {item}")
|
187 |
continue
|
188 |
try:
|
189 |
+
# Get agent response - now it's a direct string
|
190 |
+
submitted_answer = agent(question_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
# Add to answers payload
|
193 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
|
|
196 |
results_log.append({
|
197 |
"Task ID": task_id,
|
198 |
"Question": question_text,
|
199 |
+
"Submitted Answer": submitted_answer
|
|
|
200 |
})
|
201 |
|
202 |
+
# Add to JSONL output - save both the answer and reasoning
|
203 |
jsonl_output.append({
|
204 |
"task_id": task_id,
|
205 |
+
"model_answer": submitted_answer,
|
206 |
+
"reasoning_trace": "" # No separate reasoning trace now
|
207 |
})
|
208 |
|
209 |
except Exception as e:
|