Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import requests | |
import pandas as pd | |
from dotenv import load_dotenv | |
from functions import * | |
from langchain_core.messages import HumanMessage | |
import traceback | |
import time | |
load_dotenv() | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
def run_and_submit_all(profile: gr.OAuthProfile | None): | |
space_id = os.getenv("SPACE_ID") | |
if not profile: | |
print("User not logged in.") | |
return "Please Login to Hugging Face with the button.", None | |
username = profile.username | |
print(f"User logged in: {username}") | |
api_url = DEFAULT_API_URL | |
questions_url = f"{api_url}/questions" | |
submit_url = f"{api_url}/submit" | |
try: | |
graph = build_graph() | |
agent = graph.invoke | |
except Exception as e: | |
print(f"Error instantiating agent: {e}") | |
return f"Error initializing agent: {e}", None | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Repo URL not available" | |
print(f"Agent code repo: {agent_code}") | |
# Fetch questions | |
try: | |
response = requests.get(questions_url, timeout=15) | |
response.raise_for_status() | |
questions_data = response.json() | |
if not questions_data: | |
print("Fetched questions list is empty.") | |
return "Fetched questions list is empty or invalid format.", None | |
print(f"Fetched {len(questions_data)} questions.") | |
except Exception as e: | |
print(f"Error fetching questions: {e}") | |
return f"Error fetching questions: {e}", None | |
results_log = [] | |
answers_payload = [] | |
print(f"\n{'='*60}") | |
print(f"Running agent on {len(questions_data)} questions...") | |
print(f"{'='*60}\n") | |
# Add delay between questions to avoid rate limiting | |
question_delay = 3.0 # seconds between questions | |
for idx, item in enumerate(questions_data, 1): | |
task_id = item.get("task_id") | |
question_text = item.get("question") | |
if not task_id or question_text is None: | |
print(f"Skipping item with missing task_id or question: {item}") | |
continue | |
# Add delay between questions (except for the first one) | |
if idx > 1: | |
print(f"Waiting {question_delay}s before next question to avoid rate limits...") | |
time.sleep(question_delay) | |
print(f"\n--- Question {idx}/{len(questions_data)} ---") | |
print(f"Task ID: {task_id}") | |
print(f"Question: {question_text}") | |
try: | |
# Add timeout for each question | |
start_time = time.time() | |
input_messages = [HumanMessage(content=question_text)] | |
# Invoke the agent with the question | |
result = agent({"messages": input_messages}) | |
# Extract the answer from the result | |
answer = "UNKNOWN" | |
if "messages" in result and result["messages"]: | |
# Look for the last AI message with content | |
for msg in reversed(result["messages"]): | |
if hasattr(msg, "content") and isinstance(msg.content, str) and msg.content.strip(): | |
# Skip planner outputs | |
if not any(msg.content.upper().startswith(prefix) for prefix in ["SEARCH:", "CALCULATE:", "DEFINE:", "WIKIPEDIA:", "REVERSE:", "DIRECT:"]): | |
answer = msg.content.strip() | |
break | |
elapsed_time = time.time() - start_time | |
print(f"Answer: {answer}") | |
print(f"Time taken: {elapsed_time:.2f}s") | |
answers_payload.append({"task_id": task_id, "submitted_answer": answer}) | |
results_log.append({ | |
"Task ID": task_id, | |
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text, | |
"Submitted Answer": answer, | |
"Time (s)": f"{elapsed_time:.2f}" | |
}) | |
except Exception as e: | |
print(f"Error running agent on task {task_id}: {e}") | |
print(f"Traceback: {traceback.format_exc()}") | |
# Still submit UNKNOWN for errors | |
answers_payload.append({"task_id": task_id, "submitted_answer": "UNKNOWN"}) | |
results_log.append({ | |
"Task ID": task_id, | |
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text, | |
"Submitted Answer": f"ERROR: {str(e)[:50]}", | |
"Time (s)": "N/A" | |
}) | |
print(f"\n{'='*60}") | |
print(f"Completed processing all questions") | |
print(f"{'='*60}\n") | |
if not answers_payload: | |
print("Agent did not produce any answers to submit.") | |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
# Summary before submission | |
unknown_count = sum(1 for ans in answers_payload if ans["submitted_answer"] == "UNKNOWN") | |
print(f"\nSummary before submission:") | |
print(f"Total questions: {len(answers_payload)}") | |
print(f"UNKNOWN answers: {unknown_count}") | |
print(f"Attempted answers: {len(answers_payload) - unknown_count}") | |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
print(f"\nSubmitting {len(answers_payload)} answers for user '{username}'...") | |
try: | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
score = result_data.get('score', 0) | |
correct_count = result_data.get('correct_count', 0) | |
total_attempted = result_data.get('total_attempted', 0) | |
final_status = ( | |
f"Submission Successful!\n" | |
f"User: {result_data.get('username')}\n" | |
f"Overall Score: {score}% " | |
f"({correct_count}/{total_attempted} correct)\n" | |
f"Message: {result_data.get('message', 'No message received.')}" | |
) | |
print("\n" + "="*60) | |
print("SUBMISSION RESULTS:") | |
print(f"Score: {score}%") | |
print(f"Correct: {correct_count}/{total_attempted}") | |
print("="*60) | |
results_df = pd.DataFrame(results_log) | |
return final_status, results_df | |
except Exception as e: | |
status_message = f"Submission Failed: {e}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
# Gradio UI | |
with gr.Blocks() as demo: | |
gr.Markdown("# Enhanced GAIA Agent Evaluation Runner") | |
gr.Markdown( | |
""" | |
This enhanced agent is optimized for GAIA benchmark questions with improved: | |
- Planning logic for better tool selection | |
- Search capabilities with more comprehensive results | |
- Mathematical expression parsing | |
- Answer extraction from search results | |
- Error handling and logging | |
Target: >50% accuracy on GAIA questions | |
""" | |
) | |
gr.LoginButton() | |
run_button = gr.Button("Run Evaluation & Submit All Answers") | |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
run_button.click( | |
fn=run_and_submit_all, | |
outputs=[status_output, results_table] | |
) | |
if __name__ == "__main__": | |
print("\n" + "-"*30 + " App Starting " + "-"*30) | |
space_host_startup = os.getenv("SPACE_HOST") | |
space_id_startup = os.getenv("SPACE_ID") | |
if space_host_startup: | |
print(f" SPACE_HOST found: {space_host_startup}") | |
print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
else: | |
print("SPACE_HOST environment variable not found (running locally?).") | |
if space_id_startup: | |
print(f" SPACE_ID found: {space_id_startup}") | |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
else: | |
print("SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
print("-"*(60 + len(" App Starting ")) + "\n") | |
print("Launching Gradio Interface for Enhanced GAIA Agent Evaluation...") | |
demo.launch(debug=True, share=False) |