dlaima's picture
Update app.py
1381703 verified
raw
history blame
8.65 kB
import os
import gradio as gr
import requests
import pandas as pd
from smolagents import ToolCallingAgent, OpenAIServerModel
from audio_transcriber import AudioTranscriptionTool
from image_analyzer import ImageAnalysisTool
from wikipedia_searcher import WikipediaSearcher
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
SYSTEM_PROMPT = (
"You are an agent solving the GAIA benchmark and must provide exact answers.\n"
"Rules:\n"
"1. Return only the exact requested answer: no explanation.\n"
"2. For yes/no, return 'Yes' or 'No'.\n"
"3. For dates, use the exact requested format.\n"
"4. For numbers, use only the number.\n"
"5. For names, use the exact name from sources.\n"
"6. If the question has a file, download it using the task ID.\n"
"Examples:\n"
"- '42'\n"
"- 'Arturo Nunez'\n"
"- 'Yes'\n"
"- 'October 5, 2001'\n"
"- 'Buenos Aires'\n"
"Never say 'the answer is...'. Only return the answer.\n"
)
class GaiaAgent:
def __init__(self):
print("Gaia Agent Initialized")
# Initialize the OpenAI GPT-3.5-turbo model via smolagents OpenAIServerModel
self.model = OpenAIServerModel(
model_name="gpt-3.5-turbo",
api_key=os.getenv("OPENAI_API_KEY") # Make sure you set this in your environment
)
# Initialize the tools
self.tools = [
AudioTranscriptionTool(),
ImageAnalysisTool(),
WikipediaSearcher()
]
# Create the agent with tools and model
self.agent = ToolCallingAgent(
tools=self.tools,
model=self.model
)
def __call__(self, question: str) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
full_prompt = f"{SYSTEM_PROMPT}\nQUESTION:\n{question}"
try:
result = self.agent.run(full_prompt)
print(f"Raw result from agent: {result}")
if isinstance(result, dict) and "answer" in result:
return str(result["answer"]).strip()
elif isinstance(result, str):
return result.strip()
elif isinstance(result, list):
for item in reversed(result):
if isinstance(item, dict) and item.get("role") == "assistant" and "content" in item:
return item["content"].strip()
return "ERROR: Unexpected list format"
else:
return "ERROR: Unexpected result type"
except Exception as e:
print(f"Exception during agent run: {e}")
return f"AGENT ERROR: {e}"
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = profile.username
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
try:
agent = GaiaAgent()
except Exception as e:
print(f"Error initializing agent: {e}")
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Agent code URL: {agent_code}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
return f"Error fetching questions: {e}", None
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
if not task_id:
continue
question_text = item.get("question", "")
# Download associated file if any (mp3 or jpeg) according to GAIA benchmark task
file_url = item.get("file_url")
local_file_path = None
if file_url:
try:
ext = file_url.split(".")[-1].lower()
if ext in ["mp3", "wav", "jpeg", "jpg", "png"]:
local_file_path = f"./temp_{task_id}.{ext}"
with requests.get(file_url, stream=True) as r:
r.raise_for_status()
with open(local_file_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Downloaded file for task {task_id} to {local_file_path}")
# Append info about the file path to the question so the agent knows to use it
question_text += f"\n\nFile path: {local_file_path}"
except Exception as e:
print(f"Failed to download file for task {task_id}: {e}")
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({
"Task ID": task_id,
"Question": question_text,
"Submitted Answer": submitted_answer
})
except Exception as e:
error_msg = f"AGENT ERROR: {e}"
results_log.append({
"Task ID": task_id,
"Question": question_text,
"Submitted Answer": error_msg
})
# Cleanup downloaded file
if local_file_path:
try:
os.remove(local_file_path)
except Exception:
pass
if not answers_payload:
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload
}
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
results_df = pd.DataFrame(results_log)
return final_status, results_df
except requests.exceptions.HTTPError as e:
try:
detail = e.response.json().get("detail", e.response.text)
except Exception:
detail = e.response.text[:500]
return f"Submission Failed: {detail}", pd.DataFrame(results_log)
except requests.exceptions.Timeout:
return "Submission Failed: The request timed out.", pd.DataFrame(results_log)
except Exception as e:
return f"An unexpected error occurred during submission: {e}", pd.DataFrame(results_log)
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown("""
**Instructions:**
1. Clone this space and define your agent and tools.
2. Log in to your Hugging Face account using the button below.
3. Click 'Run Evaluation & Submit All Answers' to test your agent and submit results.
""")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
if __name__ == "__main__":
print("\n" + "-"*30 + " App Starting " + "-"*30)
space_host = os.getenv("SPACE_HOST")
space_id = os.getenv("SPACE_ID")
if space_host:
print(f"✅ SPACE_HOST found: {space_host}")
print(f" Runtime URL should be: https://{space_host}.hf.space")
else:
print("ℹ️ SPACE_HOST not found.")
if space_id:
print(f"✅ SPACE_ID found: {space_id}")
print(f" Repo URL: https://huggingface.co/spaces/{space_id}")
else:
print("ℹ️ SPACE_ID not found.")
print("-"*(60 + len(" App Starting ")) + "\n")
demo.launch(debug=True, share=False)