dawid-lorek's picture
Update app.py
3a293e2 verified
raw
history blame
8.72 kB
import os
import gradio as gr
import requests
import pandas as pd
import time
import re
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.agents import AgentExecutor, create_react_agent
from langchain.memory import ConversationSummaryMemory
from typing import List, Optional
# === TOOL IMPORTS ===
from helper import repl_tool, file_saver_tool, audio_transcriber_tool, gemini_multimodal_tool, wikipedia_search_tool2
# Constants
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Prompt ---
prompt = PromptTemplate(
input_variables=["input", "agent_scratchpad", "chat_history", "tool_names"],
template="""
You are a smart and helpful AI Agent/Assistant that excels at fact-based reasoning. You are allowed and encouraged to use one or more tools as needed to answer complex questions and perform tasks.
[ ...cut for brevity: insert your strict format rules and examples here ... ]
{chat_history}
New input: {input}
---
{agent_scratchpad}
"""
)
# === AGENT DEFINITION ===
class BasicAgent:
def __init__(
self,
agent, tools: List, verbose: bool = False, handle_parsing_errors: bool = True,
max_iterations: int = 9, memory: Optional[ConversationSummaryMemory] = None
):
self.agent = agent
self.tools = tools
self.verbose = verbose
self.handle_parsing_errors = handle_parsing_errors
self.max_iterations = max_iterations
self.memory = memory
self.agent_obj = AgentExecutor(
agent=self.agent,
tools=self.tools,
verbose=self.verbose,
handle_parsing_errors=self.handle_parsing_errors,
max_iterations=self.max_iterations,
memory=self.memory
)
def __call__(self, question: str) -> str:
result = self.agent_obj.invoke(
{"input": question},
config={"configurable": {"session_id": "test-session"}},
)
return result['output']
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# OpenAI API key only!
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
print("OpenAI API key not found in environment variables.")
return "OpenAI API key not found. Please set OPENAI_API_KEY environment variable.", None
# Use GPT-4o (or another allowed OpenAI model)
llm_client = ChatOpenAI(model='gpt-4o', temperature=0, api_key=openai_api_key)
# Tools: only offline/tools not requiring other APIs
tools = [
repl_tool,
file_saver_tool,
audio_transcriber_tool,
gemini_multimodal_tool, # If this is purely local or adapted for OpenAI images, otherwise remove!
wikipedia_search_tool2
]
summary_memory = ConversationSummaryMemory(llm=llm_client, memory_key="chat_history")
summary_react_agent = create_react_agent(
llm=llm_client,
tools=tools,
prompt=prompt
)
# 1. Instantiate Agent
try:
agent = BasicAgent(summary_react_agent, tools, True, True, 30, summary_memory)
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(agent_code)
# 2. Fetch Questions
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
print(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
# 3. Run your Agent
results_log = []
answers_payload = []
print(f"Running agent on {len(questions_data)} questions...")
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
file_name = item.get("file_name")
full_question_for_agent = question_text
if file_name:
attachment_url = f"{DEFAULT_API_URL}/files/{task_id}"
full_question_for_agent += f"\n\nAttachment '{file_name}' available at EXACT URL: {attachment_url}"
print(f"Running agent on task {task_id}: {full_question_for_agent}", flush=True)
try:
submitted_answer = agent(full_question_for_agent)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
time.sleep(2) # Decrease or remove if not rate-limited!
except Exception as e:
print(f"Error running agent on task {task_id}: {e}")
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
print(status_update)
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
cleaned_final_status = re.sub(r'[^\x20-\x7E\n\r\t]+', '', final_status).strip()
results_df = pd.DataFrame(results_log)
return cleaned_final_status, results_df
except Exception as e:
print(f"Error submitting answers: {e}")
results_df = pd.DataFrame(results_log)
return f"Submission Failed: {e}", results_df
# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Log in to your Hugging Face account using the button below.
2. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
---
**Note:** Only OpenAI API key is needed!
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "-"*30 + " App Starting " + "-"*30)
space_host_startup = os.getenv("SPACE_HOST")
space_id_startup = os.getenv("SPACE_ID")
if space_host_startup:
print(f"✅ SPACE_HOST found: {space_host_startup}")
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
else:
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
if space_id_startup:
print(f"✅ SPACE_ID found: {space_id_startup}")
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
else:
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
print("-"*(60 + len(" App Starting ")) + "\n")
print("Launching Gradio Interface for Basic Agent Evaluation...")
demo.launch(debug=True, share=False)