dlaima's picture
Update app.py
c27f94c verified
raw
history blame
6.66 kB
import os
import gradio as gr
import requests
import pandas as pd
import google.generativeai as genai
from smolagents import CodeAgent, DuckDuckGoSearchTool
from smolagents.models.base import BaseModel
# Define the system prompt
SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question.
Report your thoughts, and finish your answer with just the answer — no prefixes like "FINAL ANSWER:".
Your answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
If you're asked for a number, don’t use commas or units like $ or %, unless specified.
If you're asked for a string, don’t use articles or abbreviations (e.g. for cities), and write digits in plain text unless told otherwise."""
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# Gemini model wrapper
class GeminiFlashModel(BaseModel):
def __init__(self, model_name="gemini-1.5-flash", api_key=None):
self.model_name = model_name
self.api_key = api_key or os.getenv("GOOGLE_API_KEY")
if not self.api_key:
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
genai.configure(api_key=self.api_key)
self.model = genai.GenerativeModel(model_name)
def generate(self, messages, stop_sequences=None, **kwargs):
# Insert system prompt if missing
if isinstance(messages, list):
if not any(m["role"] == "system" for m in messages):
messages = [{"role": "system", "content": SYSTEM_PROMPT}] + messages
else:
raise TypeError("Expected 'messages' to be a list of message dicts")
# Convert messages to a single string (Gemini expects plain prompt)
prompt = "\n".join([f"{m['role'].capitalize()}: {m['content']}" for m in messages])
try:
response = self.model.generate_content(prompt)
return response.text.strip()
except Exception as e:
return f"GENERATION ERROR: {e}"
# Agent using Gemini-1.5-flash
class MyAgent:
def __init__(self):
self.model = GeminiFlashModel(model_name="gemini-1.5-flash")
self.agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=self.model)
def __call__(self, question: str) -> str:
return self.agent.run(question)
# Evaluation & submission flow
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = profile.username
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
try:
agent = MyAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
return f"Error fetching questions: {e}", None
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
error_msg = f"AGENT ERROR: {e}"
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": error_msg})
if not answers_payload:
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload
}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
results_df = pd.DataFrame(results_log)
return final_status, results_df
except requests.exceptions.HTTPError as e:
try:
detail = e.response.json().get("detail", e.response.text)
except Exception:
detail = e.response.text[:500]
return f"Submission Failed: {detail}", pd.DataFrame(results_log)
except requests.exceptions.Timeout:
return "Submission Failed: The request timed out.", pd.DataFrame(results_log)
except Exception as e:
return f"An unexpected error occurred during submission: {e}", pd.DataFrame(results_log)
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown("""
**Instructions:**
1. Clone this space and modify it to define your agent's logic.
2. Log in with Hugging Face.
3. Click 'Run Evaluation & Submit All Answers' to run and submit.
""")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
if __name__ == "__main__":
print("\n" + "="*10 + " App Startup " + "="*10)
space_host = os.getenv("SPACE_HOST")
space_id = os.getenv("SPACE_ID")
if space_host:
print(f"✅ SPACE_HOST: {space_host} -> https://{space_host}.hf.space")
else:
print("ℹ️ SPACE_HOST not set.")
if space_id:
print(f"✅ SPACE_ID: {space_id}")
else:
print("ℹ️ SPACE_ID not set.")
demo.launch(debug=True, share=False)