Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import requests | |
import pandas as pd | |
import json | |
# Import your upgraded agent | |
from agent import GeminiAgent | |
# --- Constants --- | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
MY_HF_USERNAME = "benjipeng" | |
ANSWERS_FILE = "answers.json" | |
# --- Logic for Running the Agent --- | |
def run_agent_only(profile: gr.OAuthProfile | None): | |
""" | |
Fetches questions, runs the agent on them, and saves the answers to a file. | |
This is the long-running part of the process. | |
""" | |
if not profile or profile.username != MY_HF_USERNAME: | |
yield "Error: Please log in as the correct user (`benjipeng`) to run the agent.", pd.DataFrame() | |
return | |
print("Starting agent run...") | |
yield "Fetching questions...", pd.DataFrame() | |
try: | |
response = requests.get(f"{DEFAULT_API_URL}/questions", timeout=20) | |
response.raise_for_status() | |
questions_data = response.json() | |
except Exception as e: | |
yield f"Error fetching questions: {e}", pd.DataFrame() | |
return | |
yield f"Fetched {len(questions_data)} questions. Initializing agent...", pd.DataFrame() | |
agent = GeminiAgent() | |
all_answers = [] | |
results_log = [] | |
for i, item in enumerate(questions_data): | |
task_id = item.get("task_id") | |
question_text = item.get("question") | |
has_file = item.get("file", None) is not None | |
status_message = f"Processing question {i+1}/{len(questions_data)} (Task ID: {task_id})..." | |
yield status_message, pd.DataFrame(results_log) | |
modified_question = f"{question_text}\n\n[Agent Note: A file is attached.]" if has_file else question_text | |
try: | |
submitted_answer = agent(modified_question, task_id) | |
except Exception as e: | |
submitted_answer = f"AGENT ERROR: {e}" | |
all_answers.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
# Save progress incrementally | |
with open(ANSWERS_FILE, 'w') as f: | |
json.dump(all_answers, f, indent=2) | |
yield f"Agent run complete. All {len(all_answers)} answers saved to {ANSWERS_FILE}. Ready to submit.", pd.DataFrame(results_log) | |
# --- Logic for Submitting Answers --- | |
def submit_saved_answers(profile: gr.OAuthProfile | None): | |
""" | |
Reads the answers from the saved file and submits them to the scoring server. | |
This is the fast part of the process. | |
""" | |
if not profile or profile.username != MY_HF_USERNAME: | |
return "Error: Please log in as the correct user (`benjipeng`) to submit." | |
space_id = os.getenv("SPACE_ID") | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
username = profile.username | |
try: | |
with open(ANSWERS_FILE, 'r') as f: | |
answers_payload = json.load(f) | |
except FileNotFoundError: | |
return f"Error: Answers file '{ANSWERS_FILE}' not found. Please run the agent first." | |
except json.JSONDecodeError: | |
return f"Error: Could not read the answers file. It might be corrupted." | |
if not answers_payload: | |
return "Error: Answers file is empty." | |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
submit_url = f"{DEFAULT_API_URL}/submit" | |
print(f"Submitting {len(answers_payload)} answers for user '{username}'...") | |
try: | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
return ( | |
f"Submission Successful!\n" | |
f"User: {result_data.get('username')}\n" | |
f"Overall Score: {result_data.get('score', 'N/A')}% " | |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
f"Message: {result_data.get('message', 'No message received.')}" | |
) | |
except requests.exceptions.HTTPError as e: | |
return f"Submission Failed: Server responded with status {e.response.status_code}. Detail: {e.response.text}" | |
except Exception as e: | |
return f"An unexpected error occurred during submission: {e}" | |
# --- Build Gradio Interface using Blocks --- | |
with gr.Blocks() as demo: | |
gr.Markdown("# Gemini ReAct Agent for GAIA (Two-Step Submission)") | |
gr.Markdown( | |
""" | |
**Step 1: Run Agent & Save Answers** | |
- This is the long process that can take 10-20 minutes. | |
- The agent will answer all 20 questions and save the results to a file. | |
- You will see the progress in the status box and the table below. | |
**Step 2: Submit Saved Answers** | |
- Once Step 1 is complete, click this button. | |
- This will be very fast and will send your saved answers to be scored. | |
""" | |
) | |
gr.LoginButton() | |
with gr.Row(): | |
run_button = gr.Button("Step 1: Run Agent & Save Answers") | |
submit_button = gr.Button("Step 2: Submit Saved Answers") | |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True, interactive=False) | |
run_button.click( | |
fn=run_agent_only, | |
inputs=None, # LoginButton profile is passed implicitly | |
outputs=[status_output, results_table] | |
) | |
submit_button.click( | |
fn=submit_saved_answers, | |
inputs=None, # LoginButton profile is passed implicitly | |
outputs=[status_output] | |
) | |
if __name__ == "__main__": | |
print("\n" + "-"*30 + " App Starting " + "-"*30) | |
demo.launch(debug=True, share=False) |