Upload 3 files
Browse files- app.py +59 -369
- models.py +41 -0
- settings.py +29 -0
app.py
CHANGED
@@ -1,373 +1,50 @@
|
|
1 |
-
from
|
2 |
-
from
|
3 |
-
import
|
4 |
-
import
|
5 |
-
import
|
6 |
-
import
|
7 |
-
import
|
8 |
-
import
|
9 |
-
from google import genai
|
10 |
-
from google.genai import types
|
11 |
-
from tools.final_answer import FinalAnswerTool
|
12 |
-
from tools.visit_webpage import VisitWebpageTool
|
13 |
-
|
14 |
-
|
15 |
import os
|
16 |
-
import gradio as gr
|
17 |
-
import requests
|
18 |
-
import inspect
|
19 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
# (Keep Constants as is)
|
22 |
-
# --- Constants ---
|
23 |
-
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
24 |
-
|
25 |
-
# --- Basic Agent Definition ---
|
26 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
27 |
-
BASE_PYTHON_TOOLS["open"] = open
|
28 |
-
|
29 |
-
visit_webpage_tool = VisitWebpageTool()
|
30 |
-
final_answer = FinalAnswerTool()
|
31 |
-
|
32 |
-
# model = LiteLLMModel(
|
33 |
-
# model_id="gemini/gemini-2.0-flash-lite",
|
34 |
-
# temperature=0.0,
|
35 |
-
# api_key=os.environ.get("GEMINI_KEY")
|
36 |
-
# )
|
37 |
-
model = LiteLLMModel(model_id="openrouter/qwen/qwen3-30b-a3b:free")
|
38 |
-
|
39 |
-
# Import tool from Hub
|
40 |
-
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
41 |
-
|
42 |
-
class ImageDescriptionTool(Tool):
|
43 |
-
name = "image_description"
|
44 |
-
description = "This is a tool that will describe a local image file."
|
45 |
-
inputs = {
|
46 |
-
"file_name": {
|
47 |
-
"type": "string",
|
48 |
-
"description": "Complete name of the local file to describe, for example: /files/98c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea8.png",
|
49 |
-
}
|
50 |
-
}
|
51 |
-
output_type = "string"
|
52 |
-
|
53 |
-
def __init__(self):
|
54 |
-
super().__init__()
|
55 |
-
self.client = genai.Client(api_key=os.environ.get("GEMINI_KEY"))
|
56 |
-
|
57 |
-
def forward(self, file_name: str):
|
58 |
-
try:
|
59 |
-
image_file = self.client.files.upload(file=f"{file_name}")
|
60 |
-
image_description = self.client.models.generate_content(
|
61 |
-
model="gemini-2.0-flash", contents=["Describe this image file", image_file]
|
62 |
-
)
|
63 |
-
return image_description.text
|
64 |
-
except Exception as e:
|
65 |
-
print(f"Error getting image description: {e}")
|
66 |
-
return False
|
67 |
-
|
68 |
-
image_description_tool = ImageDescriptionTool()
|
69 |
-
|
70 |
-
class VideoPromptTool(Tool):
|
71 |
-
name = "video_prompt"
|
72 |
-
description = "This is a tool for prompting a YouTube video with questions to understand its content."
|
73 |
-
inputs = {
|
74 |
-
"youtube_url": {
|
75 |
-
"type": "string",
|
76 |
-
"description": "URL of the YouTube video to prompt, for example: https://www.youtube.com/watch?v=9hE5-98ZeCg",
|
77 |
-
},
|
78 |
-
"prompt": {
|
79 |
-
"type": "string",
|
80 |
-
"description": "A question about the video, for example: Please summarize the video in 3 sentences.",
|
81 |
-
}
|
82 |
-
}
|
83 |
-
output_type = "string"
|
84 |
-
|
85 |
-
def __init__(self):
|
86 |
-
super().__init__()
|
87 |
-
self.client = genai.Client(api_key=os.environ.get("GEMINI_KEY"))
|
88 |
-
|
89 |
-
def forward(self, youtube_url: str, prompt: str):
|
90 |
-
try:
|
91 |
-
video_description = self.client.models.generate_content(
|
92 |
-
model='models/gemini-2.0-flash',
|
93 |
-
contents=types.Content(
|
94 |
-
parts=[
|
95 |
-
types.Part(
|
96 |
-
file_data=types.FileData(file_uri=youtube_url)
|
97 |
-
),
|
98 |
-
types.Part(text=prompt)
|
99 |
-
]
|
100 |
-
)
|
101 |
-
)
|
102 |
-
return video_description.text
|
103 |
-
except Exception as e:
|
104 |
-
print(f"Error understanding video: {e}")
|
105 |
-
return False
|
106 |
-
|
107 |
-
video_prompt_tool = VideoPromptTool()
|
108 |
-
|
109 |
-
class AudioDescriptionTool(Tool):
|
110 |
-
name = "audio_description"
|
111 |
-
description = "This is a tool that will describe a local audio clip."
|
112 |
-
inputs = {
|
113 |
-
"file_name": {
|
114 |
-
"type": "string",
|
115 |
-
"description": "Complete name of the local file to describe, for example: /files/98c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea8.mp3",
|
116 |
-
}
|
117 |
-
}
|
118 |
-
output_type = "string"
|
119 |
-
|
120 |
-
def __init__(self):
|
121 |
-
super().__init__()
|
122 |
-
self.client = genai.Client(api_key=os.environ.get("GEMINI_KEY"))
|
123 |
-
|
124 |
-
def forward(self, file_name: str):
|
125 |
-
try:
|
126 |
-
mp3_file = self.client.files.upload(file=f"{file_name}")
|
127 |
-
audio_description = self.client.models.generate_content(
|
128 |
-
model="gemini-2.0-flash", contents=["Describe this audio clip", mp3_file]
|
129 |
-
)
|
130 |
-
return audio_description.text
|
131 |
-
except Exception as e:
|
132 |
-
print(f"Error getting audio description: {e}")
|
133 |
-
return False
|
134 |
-
|
135 |
-
audio_description_tool = AudioDescriptionTool()
|
136 |
-
|
137 |
-
# class FetchFileTool(Tool):
|
138 |
-
# fetch_file_tool = FetchFileTool()
|
139 |
-
|
140 |
-
class WikipediaSearchTool(Tool):
|
141 |
-
name = "wikipedia_search"
|
142 |
-
description = "Fetches a summary of a Wikipedia page based on a given search query (only one word or group of words)."
|
143 |
-
inputs = {
|
144 |
-
"query": {"type": "string", "description": "The search term for the Wikipedia page (only one word or group of words)."}
|
145 |
-
}
|
146 |
-
output_type = "string"
|
147 |
-
|
148 |
-
def __init__(self, lang="en"):
|
149 |
-
super().__init__()
|
150 |
-
self.wiki = wikipediaapi.Wikipedia(
|
151 |
-
language=lang, user_agent="MinimalAgent/1.0")
|
152 |
-
|
153 |
-
def forward(self, query: str):
|
154 |
-
page = self.wiki.page(query)
|
155 |
-
if not page.exists():
|
156 |
-
return "No Wikipedia page found."
|
157 |
-
return page.summary[:1000]
|
158 |
|
159 |
-
|
|
|
160 |
|
161 |
-
|
162 |
-
|
163 |
-
#Keep this format for the description / args / args description but feel free to modify the tool
|
164 |
-
"""A tool that provides web search via duckduckgo
|
165 |
-
Args:
|
166 |
-
arg1: the first argument
|
167 |
-
arg2: the second argument
|
168 |
-
"""
|
169 |
-
search_tool = DuckDuckGoSearchTool()
|
170 |
-
return search_tool(arg1)
|
171 |
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
Args:
|
176 |
-
timezone: A string representing a valid timezone (e.g., 'America/New_York').
|
177 |
-
"""
|
178 |
-
try:
|
179 |
-
# Create timezone object
|
180 |
-
tz = pytz.timezone(timezone)
|
181 |
-
# Get current time in that timezone
|
182 |
-
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
183 |
-
return f"The current local time in {timezone} is: {local_time}"
|
184 |
-
except Exception as e:
|
185 |
-
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
186 |
|
187 |
-
|
188 |
-
|
189 |
-
with open("prompts.yaml", 'r') as stream:
|
190 |
-
prompt_templates = yaml.safe_load(stream)
|
191 |
-
self.agent = CodeAgent(
|
192 |
-
model=model,
|
193 |
-
tools=[final_answer, search_tool, visit_webpage_tool, image_description_tool, audio_description_tool, video_prompt_tool],
|
194 |
-
max_steps=10,
|
195 |
-
verbosity_level=1,
|
196 |
-
additional_authorized_imports=[
|
197 |
-
"unicodedata",
|
198 |
-
"stat",
|
199 |
-
"datetime",
|
200 |
-
"random",
|
201 |
-
"pandas",
|
202 |
-
"itertools",
|
203 |
-
"math",
|
204 |
-
"statistics",
|
205 |
-
"queue",
|
206 |
-
"time",
|
207 |
-
"collections",
|
208 |
-
"re"
|
209 |
-
],
|
210 |
-
grammar=None,
|
211 |
-
planning_interval=None,
|
212 |
-
name=None,
|
213 |
-
description=None,
|
214 |
-
prompt_templates=prompt_templates
|
215 |
-
)
|
216 |
-
print("BasicAgent initialized.")
|
217 |
-
def __call__(self, question: str) -> str:
|
218 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
219 |
-
# fixed_answer = "This is a default answer."
|
220 |
-
final_answer = self.agent.run(question)
|
221 |
-
print(f"Agent returning fixed answer: {final_answer}")
|
222 |
-
return final_answer
|
223 |
|
224 |
-
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
225 |
-
"""
|
226 |
-
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
227 |
-
and displays the results.
|
228 |
-
"""
|
229 |
-
# --- Determine HF Space Runtime URL and Repo URL ---
|
230 |
-
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
231 |
|
232 |
-
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
235 |
else:
|
236 |
-
|
237 |
-
return "Please Login to Hugging Face with the button.", None
|
238 |
-
|
239 |
-
api_url = DEFAULT_API_URL
|
240 |
-
questions_url = f"{api_url}/questions"
|
241 |
-
submit_url = f"{api_url}/submit"
|
242 |
-
|
243 |
-
# 1. Instantiate Agent (modify this part to create your agent)
|
244 |
-
try:
|
245 |
-
agent = BasicAgent()
|
246 |
-
except Exception as e:
|
247 |
-
print(f"Error instantiating agent: {e}")
|
248 |
-
return f"Error initializing agent: {e}", None
|
249 |
-
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
250 |
-
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
251 |
-
print(agent_code)
|
252 |
-
|
253 |
-
# 2. Fetch Questions
|
254 |
-
print(f"Fetching questions from: {questions_url}")
|
255 |
-
try:
|
256 |
-
# response = requests.get(questions_url, timeout=15)
|
257 |
-
# response.raise_for_status()
|
258 |
-
# questions_data = response.json()
|
259 |
|
260 |
-
|
261 |
-
# if not questions_data:
|
262 |
-
# print("Fetched questions list is empty.")
|
263 |
-
# return "Fetched questions list is empty or invalid format.", None
|
264 |
-
# print(f"Fetched {len(questions_data)} questions.")
|
265 |
-
# except requests.exceptions.RequestException as e:
|
266 |
-
# print(f"Error fetching questions: {e}")
|
267 |
-
# return f"Error fetching questions: {e}", None
|
268 |
-
# except requests.exceptions.JSONDecodeError as e:
|
269 |
-
# print(f"Error decoding JSON response from questions endpoint: {e}")
|
270 |
-
# print(f"Response text: {response.text[:500]}")
|
271 |
-
# return f"Error decoding server response for questions: {e}", None
|
272 |
-
except Exception as e:
|
273 |
-
print(f"An unexpected error occurred fetching questions: {e}")
|
274 |
-
return f"An unexpected error occurred fetching questions: {e}", None
|
275 |
|
276 |
-
# 3. Run your Agent
|
277 |
-
results_log = []
|
278 |
-
answers_payload = []
|
279 |
-
print(f"Running agent on {len(questions_data)} questions...")
|
280 |
-
for item in questions_data:
|
281 |
-
task_id = item.get("task_id")
|
282 |
-
question_text = item.get("question") + " think hard to answer."
|
283 |
-
if not task_id or question_text is None:
|
284 |
-
print(f"Skipping item with missing task_id or question: {item}")
|
285 |
-
continue
|
286 |
-
try:
|
287 |
-
# check if the file_name is not empty
|
288 |
-
if item.get("file_name"):
|
289 |
-
# question_text = f"{question_text} Here is the file: https://agents-course-unit4-scoring.hf.space/files/{item.get('task_id')}"
|
290 |
-
question_text = f"{question_text} Here is the local file path: files/{item.get('file_name')}"
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
# chess image
|
296 |
-
# if item.get("task_id") == "cca530fc-4052-43b2-b130-b30968d8aa44":
|
297 |
-
|
298 |
-
# python code
|
299 |
-
# if item.get("task_id") == "f918266a-b3e0-4914-865d-4faa564f1aef":
|
300 |
-
# submitted_answer = agent(question_text)
|
301 |
-
|
302 |
-
# youtube video
|
303 |
-
# if item.get("task_id") == "a1e91b78-d3d8-4675-bb8d-62741b4b68a6":
|
304 |
-
# submitted_answer = agent(question_text)
|
305 |
-
# else:
|
306 |
-
# continue
|
307 |
-
|
308 |
-
submitted_answer = agent(question_text)
|
309 |
-
|
310 |
-
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
311 |
-
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
312 |
-
# Sleep to avoid the Gemini throttling at 15 RPM
|
313 |
-
time.sleep(45)
|
314 |
-
except Exception as e:
|
315 |
-
print(f"Error running agent on task {task_id}: {e}")
|
316 |
-
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
317 |
-
|
318 |
-
if not answers_payload:
|
319 |
-
print("Agent did not produce any answers to submit.")
|
320 |
-
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
321 |
-
|
322 |
-
# return "Questions parsed.", pd.DataFrame(results_log)
|
323 |
-
|
324 |
-
# 4. Prepare Submission
|
325 |
-
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
326 |
-
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
327 |
-
print(status_update)
|
328 |
-
|
329 |
-
# 5. Submit
|
330 |
-
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
331 |
-
try:
|
332 |
-
response = requests.post(submit_url, json=submission_data, timeout=60)
|
333 |
-
response.raise_for_status()
|
334 |
-
result_data = response.json()
|
335 |
-
final_status = (
|
336 |
-
f"Submission Successful!\n"
|
337 |
-
f"User: {result_data.get('username')}\n"
|
338 |
-
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
339 |
-
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
340 |
-
f"Message: {result_data.get('message', 'No message received.')}"
|
341 |
-
)
|
342 |
-
print("Submission successful.")
|
343 |
-
results_df = pd.DataFrame(results_log)
|
344 |
-
return final_status, results_df
|
345 |
-
except requests.exceptions.HTTPError as e:
|
346 |
-
error_detail = f"Server responded with status {e.response.status_code}."
|
347 |
-
try:
|
348 |
-
error_json = e.response.json()
|
349 |
-
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
350 |
-
except requests.exceptions.JSONDecodeError:
|
351 |
-
error_detail += f" Response: {e.response.text[:500]}"
|
352 |
-
status_message = f"Submission Failed: {error_detail}"
|
353 |
-
print(status_message)
|
354 |
-
results_df = pd.DataFrame(results_log)
|
355 |
-
return status_message, results_df
|
356 |
-
except requests.exceptions.Timeout:
|
357 |
-
status_message = "Submission Failed: The request timed out."
|
358 |
-
print(status_message)
|
359 |
-
results_df = pd.DataFrame(results_log)
|
360 |
-
return status_message, results_df
|
361 |
-
except requests.exceptions.RequestException as e:
|
362 |
-
status_message = f"Submission Failed: Network error - {e}"
|
363 |
-
print(status_message)
|
364 |
-
results_df = pd.DataFrame(results_log)
|
365 |
-
return status_message, results_df
|
366 |
-
except Exception as e:
|
367 |
-
status_message = f"An unexpected error occurred during submission: {e}"
|
368 |
-
print(status_message)
|
369 |
-
results_df = pd.DataFrame(results_log)
|
370 |
-
return status_message, results_df
|
371 |
|
372 |
|
373 |
# --- Build Gradio Interface using Blocks ---
|
@@ -390,37 +67,50 @@ with gr.Blocks() as demo:
|
|
390 |
|
391 |
gr.LoginButton()
|
392 |
|
393 |
-
|
|
|
|
|
394 |
|
395 |
-
status_output = gr.Textbox(
|
396 |
-
|
397 |
-
results_table = gr.DataFrame(
|
|
|
398 |
|
399 |
-
|
400 |
-
fn=
|
401 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
402 |
)
|
403 |
|
404 |
if __name__ == "__main__":
|
405 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
406 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
407 |
space_host_startup = os.getenv("SPACE_HOST")
|
408 |
-
space_id_startup = os.getenv("SPACE_ID")
|
409 |
|
410 |
if space_host_startup:
|
411 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
412 |
-
print(
|
|
|
413 |
else:
|
414 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
415 |
|
416 |
-
if space_id_startup:
|
417 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
418 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
419 |
-
print(
|
|
|
420 |
else:
|
421 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
422 |
|
423 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
424 |
|
425 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
426 |
-
demo.launch(debug=True, share=False)
|
|
|
1 |
+
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
|
2 |
+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
3 |
+
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
|
4 |
+
from opentelemetry.sdk.trace import TracerProvider
|
5 |
+
from opentelemetry import trace
|
6 |
+
from evaluator import Evaluator
|
7 |
+
from runner import Runner
|
8 |
+
from settings import Settings
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import os
|
|
|
|
|
|
|
10 |
import pandas as pd
|
11 |
+
import gradio as gr
|
12 |
+
import logging
|
13 |
+
logging.basicConfig(level=logging.INFO, force=True)
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
settings = Settings()
|
16 |
+
evaluator = Evaluator(settings)
|
17 |
+
runner = Runner(settings)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
# Create a TracerProvider for OpenTelemetry
|
21 |
+
trace_provider = TracerProvider()
|
22 |
|
23 |
+
# Add a SimpleSpanProcessor with the OTLPSpanExporter to send traces
|
24 |
+
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
# Set the global default tracer provider
|
27 |
+
trace.set_tracer_provider(trace_provider)
|
28 |
+
tracer = trace.get_tracer(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
# Instrument smolagents with the configured provider
|
31 |
+
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
def run(test_mode=False) -> pd.DataFrame:
|
35 |
+
if test_mode:
|
36 |
+
questions = [evaluator.get_one_question()]
|
37 |
+
# questions = [get_one_question(task_id='8e867cd7-cff9-4e6c-867a-ff5ddc2550be')]
|
38 |
+
# questions = [get_one_question('3f57289b-8c60-48be-bd80-01f8099ca449')]
|
39 |
+
# questions = [get_one_question('cca530fc-4052-43b2-b130-b30968d8aa44')]
|
40 |
else:
|
41 |
+
questions = evaluator.get_questions()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
return runner.run_agent(questions)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
+
def submit():
|
47 |
+
evaluator.submit_answers()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
|
50 |
# --- Build Gradio Interface using Blocks ---
|
|
|
67 |
|
68 |
gr.LoginButton()
|
69 |
|
70 |
+
run_one_button = gr.Button("Get One Answer")
|
71 |
+
run_all_button = gr.Button("Run Full Evaluation")
|
72 |
+
submit_button = gr.Button("Submit All Answers")
|
73 |
|
74 |
+
status_output = gr.Textbox(
|
75 |
+
label="Run Status / Submission Result", lines=5, interactive=False)
|
76 |
+
results_table = gr.DataFrame(
|
77 |
+
label="Questions and Agent Answers", wrap=True)
|
78 |
|
79 |
+
run_one_button.click(
|
80 |
+
fn=run, inputs=[gr.Checkbox(value=True, visible=False)],
|
81 |
+
outputs=[results_table]
|
82 |
+
)
|
83 |
+
run_all_button.click(
|
84 |
+
fn=run, inputs=[gr.Checkbox(value=False, visible=False)],
|
85 |
+
outputs=[results_table]
|
86 |
+
)
|
87 |
+
submit_button.click(
|
88 |
+
fn=evaluator.get_one_question,
|
89 |
+
outputs=[status_output]
|
90 |
)
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
94 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
95 |
space_host_startup = os.getenv("SPACE_HOST")
|
96 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
97 |
|
98 |
if space_host_startup:
|
99 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
100 |
+
print(
|
101 |
+
f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
102 |
else:
|
103 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
104 |
|
105 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
106 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
107 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
108 |
+
print(
|
109 |
+
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
110 |
else:
|
111 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
112 |
|
113 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
114 |
|
115 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
116 |
+
demo.launch(debug=True, share=False)
|
models.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import StrEnum
|
2 |
+
from pydantic import BaseModel, ConfigDict
|
3 |
+
|
4 |
+
|
5 |
+
class GoogleModelID(StrEnum):
|
6 |
+
GEMINI_2_0_FLASH = "gemini-2.0-flash"
|
7 |
+
GEMINI_2_5_FLASH_PREVIEW = "gemini-2.5-flash-preview"
|
8 |
+
|
9 |
+
class OpenRouterModelID(StrEnum):
|
10 |
+
QWEN_3_14B_FREE = "openrouter/qwen/qwen3-14b:free"
|
11 |
+
GPT_4_1_MINI = "openrouter/openai/gpt-4.1-mini"
|
12 |
+
GPT_O4_MINI = "openrouter/openai/o4-mini"
|
13 |
+
GROK_3_MINI_BETA = "openrouter/x-ai/grok-3-mini-beta"
|
14 |
+
GROK_3_BETA = "openrouter/x-ai/grok-3-beta"
|
15 |
+
|
16 |
+
class Question(BaseModel):
|
17 |
+
model_config = ConfigDict(validate_by_name=True, validate_by_alias=True)
|
18 |
+
task_id: str
|
19 |
+
question: str
|
20 |
+
file_name: str
|
21 |
+
|
22 |
+
class Answer(BaseModel):
|
23 |
+
task_id: str
|
24 |
+
answer: str
|
25 |
+
|
26 |
+
class QuestionAnswerPair(BaseModel):
|
27 |
+
task_id: str
|
28 |
+
question: str
|
29 |
+
answer: str
|
30 |
+
|
31 |
+
def get_answer(self) -> dict[str, str]:
|
32 |
+
return {"task_id": self.task_id, "submitted_answer": self.answer}
|
33 |
+
|
34 |
+
class Results(BaseModel):
|
35 |
+
model_config = ConfigDict(from_attributes=True)
|
36 |
+
username: str
|
37 |
+
score: int
|
38 |
+
correct_count: int
|
39 |
+
total_attempted: int
|
40 |
+
message: str
|
41 |
+
timestamp: str
|
settings.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
4 |
+
from pydantic import HttpUrl, SecretStr
|
5 |
+
|
6 |
+
|
7 |
+
class Settings(BaseSettings):
|
8 |
+
def __init__(self):
|
9 |
+
super().__init__()
|
10 |
+
self.set_langfuse_auth()
|
11 |
+
model_config = SettingsConfigDict(env_file='.env')
|
12 |
+
scoring_api_base_url: HttpUrl = HttpUrl(
|
13 |
+
"https://agents-course-unit4-scoring.hf.space"
|
14 |
+
)
|
15 |
+
chess_eval_url: HttpUrl = HttpUrl(
|
16 |
+
"https://stockfish.online/api/s/v2.php"
|
17 |
+
)
|
18 |
+
gemini_api_key: SecretStr
|
19 |
+
langfuse_public_key: SecretStr
|
20 |
+
langfuse_secret_key: SecretStr
|
21 |
+
openrouter_api_key: SecretStr
|
22 |
+
otel_exporter_otlp_endpoint: HttpUrl
|
23 |
+
serper_api_key: SecretStr
|
24 |
+
space_id: str
|
25 |
+
username: str
|
26 |
+
|
27 |
+
def set_langfuse_auth(self):
|
28 |
+
LANGFUSE_AUTH = base64.b64encode(f"{self.langfuse_public_key.get_secret_value()}:{self.langfuse_secret_key.get_secret_value()}".encode()).decode()
|
29 |
+
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"
|