dlaima's picture
Update app.py
474fee9 verified
raw
history blame
7.01 kB
import os
import gradio as gr
import requests
import pandas as pd
from PIL import Image
import base64
import io
import google.generativeai as genai
from smolagents import CodeAgent, DuckDuckGoSearchTool, LiteLLMModel
# System prompt used by the agent
SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question.
Report your thoughts, and finish your answer with just the answer — no prefixes like \"FINAL ANSWER:\".
Your answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
If you're asked for a number, don’t use commas or units like $ or %, unless specified.
If you're asked for a string, don’t use articles or abbreviations (e.g. for cities), and write digits in plain text unless told otherwise."""
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# Load GEMINI_API_KEY from environment
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
# Agent wrapper
class MyAgent:
def __init__(self):
model = LiteLLMModel(model_id="gemini/gemini-1.5-flash", api_key=GEMINI_API_KEY)
self.agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
def __call__(self, question: str, code: str | None = None, excel_df: pd.DataFrame | None = None, image: Image.Image | None = None) -> str:
if excel_df is not None:
preview = excel_df.head().to_csv(index=False)
context = f"This is a preview of the attached Excel sales data:\n\n{preview}"
prompt = f"{question}\n\n{context}"
return self.agent.run(prompt)
elif image is not None:
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
prompt = f"{question}\n\nThis is the attached image (base64 JPEG):\n\n{img_b64}"
return self.agent.run(prompt)
elif code:
formatted = f"{question}\n\nThoughts: Let's analyze the attached code.\nCode:\n```py\n{code}\n```<end_code>"
return self.agent.run(formatted)
else:
return self.agent.run(question)
# Main evaluation function
def run_and_submit_all(profile: gr.OAuthProfile | None, uploaded_code: list[gr.File] | None, uploaded_excel: list[gr.File] | None, uploaded_image: list[gr.File] | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = profile.username
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please login to Hugging Face.", None
questions_url = f"{DEFAULT_API_URL}/questions"
submit_url = f"{DEFAULT_API_URL}/submit"
try:
agent = MyAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
except Exception as e:
return f"Error fetching questions: {e}", None
uploaded_code_str = ""
if uploaded_code:
try:
uploaded_file = uploaded_code[0]
uploaded_code_str = uploaded_file.read().decode("utf-8")
except Exception as e:
uploaded_code_str = f"# Failed to load uploaded code: {e}"
uploaded_excel_df = None
if uploaded_excel:
try:
uploaded_excel_df = pd.read_excel(uploaded_excel[0].name)
except Exception as e:
print(f"Error reading Excel: {e}")
uploaded_excel_df = None
uploaded_image_obj = None
if uploaded_image:
try:
uploaded_image_obj = Image.open(uploaded_image[0].name)
except Exception as e:
print(f"Error loading image: {e}")
uploaded_image_obj = None
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
try:
answer = agent(
question_text,
uploaded_code_str if "code" in question_text.lower() else None,
uploaded_excel_df if "excel" in question_text.lower() or "spreadsheet" in question_text.lower() else None,
uploaded_image_obj if "image" in question_text.lower() or "photo" in question_text.lower() or "jpg" in question_text.lower() else None
)
answers_payload.append({"task_id": task_id, "submitted_answer": answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": answer})
except Exception as e:
results_log.append({
"Task ID": task_id,
"Question": question_text,
"Submitted Answer": f"AGENT ERROR: {e}"
})
if not answers_payload:
return "Agent did not return any answers.", pd.DataFrame(results_log)
submission_data = {
"username": profile.username.strip(),
"agent_code": f"https://huggingface.co/spaces/{space_id}/tree/main",
"answers": answers_payload
}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
return final_status, pd.DataFrame(results_log)
except Exception as e:
return f"Submission failed: {e}", pd.DataFrame(results_log)
# Gradio UI setup
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown("""
**Instructions:**
1. Clone this space and configure your Gemini API key.
2. Log in to Hugging Face.
3. Optionally upload Python code, Excel file, or image used by the questions.
4. Run your agent on evaluation tasks and submit answers.
""")
gr.LoginButton()
code_upload = gr.File(label="Upload Python code file", file_types=[".py"])
excel_upload = gr.File(label="Upload Excel file", file_types=[".xls", ".xlsx"])
image_upload = gr.File(label="Upload Image file", file_types=[".jpg", ".jpeg"])
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Results", wrap=True)
run_button.click(
fn=run_and_submit_all,
inputs=[gr.OAuthProfile(), code_upload, excel_upload, image_upload],
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("🔧 App starting...")
demo.launch(debug=True, share=False)