Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,302 +3,34 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
-
import
|
| 7 |
-
import json
|
| 8 |
-
import math
|
| 9 |
-
import time
|
| 10 |
-
from typing import Dict, Any, List, Optional, Union
|
| 11 |
-
|
| 12 |
# (Keep Constants as is)
|
| 13 |
# --- Constants ---
|
| 14 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 15 |
|
| 16 |
-
# --- Tool Definitions ---
|
| 17 |
-
class Tools:
|
| 18 |
-
@staticmethod
|
| 19 |
-
def calculator(expression: str) -> Union[float, str]:
|
| 20 |
-
"""Safely evaluate mathematical expressions"""
|
| 21 |
-
# Clean the expression to only contain valid math operations
|
| 22 |
-
try:
|
| 23 |
-
# Extract numbers and operators
|
| 24 |
-
safe_expr = re.sub(r'[^0-9+\-*/().%\s]', '', expression)
|
| 25 |
-
# Calculate using a safer approach than eval()
|
| 26 |
-
# Use a restricted namespace for evaluation
|
| 27 |
-
safe_globals = {"__builtins__": {}}
|
| 28 |
-
safe_locals = {"math": math}
|
| 29 |
-
# Add basic math functions
|
| 30 |
-
for func in ['sin', 'cos', 'tan', 'sqrt', 'log', 'exp', 'floor', 'ceil']:
|
| 31 |
-
safe_locals[func] = getattr(math, func)
|
| 32 |
-
|
| 33 |
-
result = eval(safe_expr, safe_globals, safe_locals)
|
| 34 |
-
return result
|
| 35 |
-
except Exception as e:
|
| 36 |
-
return f"Error in calculation: {str(e)}"
|
| 37 |
-
|
| 38 |
-
@staticmethod
|
| 39 |
-
def search(query: str) -> str:
|
| 40 |
-
"""Simulate a web search with predefined responses for common queries"""
|
| 41 |
-
# This is a mock search function - in a real scenario, you might
|
| 42 |
-
# use a proper search API like SerpAPI or DuckDuckGo
|
| 43 |
-
knowledge_base = {
|
| 44 |
-
"population": "The current world population is approximately 8 billion people.",
|
| 45 |
-
"capital of france": "The capital of France is Paris.",
|
| 46 |
-
"largest planet": "Jupiter is the largest planet in our solar system.",
|
| 47 |
-
"tallest mountain": "Mount Everest is the tallest mountain above sea level at 8,848.86 meters.",
|
| 48 |
-
"deepest ocean": "The Mariana Trench is the deepest ocean trench, located in the Pacific Ocean.",
|
| 49 |
-
"president": "The current president of the United States is Joe Biden (as of 2024).",
|
| 50 |
-
"water boiling point": "Water boils at 100 degrees Celsius (212 degrees Fahrenheit) at standard pressure.",
|
| 51 |
-
"pi": "The mathematical constant pi (π) is approximately 3.14159.",
|
| 52 |
-
"speed of light": "The speed of light in vacuum is approximately 299,792,458 meters per second.",
|
| 53 |
-
"human body temperature": "Normal human body temperature is around 37 degrees Celsius (98.6 degrees Fahrenheit)."
|
| 54 |
-
}
|
| 55 |
-
|
| 56 |
-
# Try to find a relevant answer in our knowledge base
|
| 57 |
-
for key, value in knowledge_base.items():
|
| 58 |
-
if key in query.lower():
|
| 59 |
-
return value
|
| 60 |
-
|
| 61 |
-
return "No relevant information found in the knowledge base."
|
| 62 |
-
|
| 63 |
-
@staticmethod
|
| 64 |
-
def date_info() -> str:
|
| 65 |
-
"""Provide the current date"""
|
| 66 |
-
return time.strftime("%Y-%m-%d")
|
| 67 |
-
|
| 68 |
-
# --- LLM Interface ---
|
| 69 |
-
class LLMInterface:
|
| 70 |
-
@staticmethod
|
| 71 |
-
def query_llm(prompt: str) -> str:
|
| 72 |
-
"""Query a free LLM through Hugging Face's inference API"""
|
| 73 |
-
try:
|
| 74 |
-
# Using a smaller, more reliable free model
|
| 75 |
-
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
|
| 76 |
-
# Alternative models you can try if this one doesn't work:
|
| 77 |
-
# - "distilbert-base-uncased-finetuned-sst-2-english"
|
| 78 |
-
# - "gpt2"
|
| 79 |
-
# - "microsoft/DialoGPT-medium"
|
| 80 |
-
|
| 81 |
-
headers = {"Content-Type": "application/json"}
|
| 82 |
-
|
| 83 |
-
# Use a well-formatted prompt
|
| 84 |
-
payload = {
|
| 85 |
-
"inputs": prompt,
|
| 86 |
-
"parameters": {"max_length": 100, "do_sample": False}
|
| 87 |
-
}
|
| 88 |
-
|
| 89 |
-
response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
|
| 90 |
-
|
| 91 |
-
if response.status_code == 200:
|
| 92 |
-
result = response.json()
|
| 93 |
-
# Handle different response formats
|
| 94 |
-
if isinstance(result, list) and len(result) > 0:
|
| 95 |
-
return result[0].get("generated_text", "").strip()
|
| 96 |
-
elif isinstance(result, dict):
|
| 97 |
-
return result.get("generated_text", "").strip()
|
| 98 |
-
else:
|
| 99 |
-
return str(result).strip()
|
| 100 |
-
elif response.status_code == 503:
|
| 101 |
-
# Model is loading
|
| 102 |
-
return "I need more time to think about this. The model is currently loading."
|
| 103 |
-
else:
|
| 104 |
-
# Fallback for other API issues
|
| 105 |
-
return "I don't have enough information to answer that question precisely."
|
| 106 |
-
|
| 107 |
-
except requests.exceptions.Timeout:
|
| 108 |
-
return "The model is taking too long to respond. Let me give a simpler answer instead."
|
| 109 |
-
except Exception as e:
|
| 110 |
-
# More robust fallback system with common answers
|
| 111 |
-
common_answers = {
|
| 112 |
-
"population": "The current world population is approximately 8 billion people.",
|
| 113 |
-
"capital": "I can tell you about many capitals. For example, Paris is the capital of France.",
|
| 114 |
-
"math": "I can help with mathematical calculations.",
|
| 115 |
-
"weather": "I don't have access to current weather information.",
|
| 116 |
-
"date": "I can tell you that a day has 24 hours.",
|
| 117 |
-
"time": "I can't check the current time."
|
| 118 |
-
}
|
| 119 |
-
|
| 120 |
-
# Check if any keywords match
|
| 121 |
-
for keyword, answer in common_answers.items():
|
| 122 |
-
if keyword in prompt.lower():
|
| 123 |
-
return answer
|
| 124 |
-
|
| 125 |
-
return "I'm sorry, I couldn't process that request properly. Please try asking in a simpler way."
|
| 126 |
|
| 127 |
-
# ---
|
|
|
|
| 128 |
class BasicAgent:
|
| 129 |
def __init__(self):
|
| 130 |
-
print("
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
|
|
|
|
|
|
| 138 |
def __call__(self, question: str) -> str:
|
| 139 |
-
print(f"Agent received question: {question[:50]}...")
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
# Step 2: Use appropriate tool or direct answer
|
| 145 |
-
if tool_needed:
|
| 146 |
-
if tool_name == "calculator":
|
| 147 |
-
# Extract the math expression from the question
|
| 148 |
-
expression = self._extract_math_expression(question)
|
| 149 |
-
if expression:
|
| 150 |
-
result = self.tools["calculator"](expression)
|
| 151 |
-
# Format numerical answers appropriately
|
| 152 |
-
if isinstance(result, (int, float)):
|
| 153 |
-
if result == int(result):
|
| 154 |
-
answer = str(int(result)) # Remove decimal for whole numbers
|
| 155 |
-
else:
|
| 156 |
-
answer = str(result) # Keep decimal for fractions
|
| 157 |
-
else:
|
| 158 |
-
answer = str(result)
|
| 159 |
-
else:
|
| 160 |
-
answer = "Unable to extract a mathematical expression from the question."
|
| 161 |
-
|
| 162 |
-
elif tool_name == "search":
|
| 163 |
-
result = self.tools["search"](question)
|
| 164 |
-
answer = self._extract_direct_answer(question, result)
|
| 165 |
-
|
| 166 |
-
elif tool_name == "date":
|
| 167 |
-
result = self.tools["date"]()
|
| 168 |
-
answer = result
|
| 169 |
-
|
| 170 |
-
else:
|
| 171 |
-
# Use LLM for other types of questions
|
| 172 |
-
answer = self._get_answer_from_llm(question)
|
| 173 |
-
else:
|
| 174 |
-
# Direct answer for simpler questions
|
| 175 |
-
answer = self._get_answer_from_llm(question)
|
| 176 |
-
|
| 177 |
-
print(f"Agent returning answer: {answer[:50]}...")
|
| 178 |
-
return answer
|
| 179 |
-
|
| 180 |
-
def _analyze_question(self, question: str) -> tuple:
|
| 181 |
-
"""Determine if the question requires a tool and which one"""
|
| 182 |
-
# Check for mathematical questions
|
| 183 |
-
math_patterns = [
|
| 184 |
-
r'calculate', r'compute', r'what is \d+', r'how much is',
|
| 185 |
-
r'sum of', r'multiply', r'divide', r'subtract', r'plus', r'minus',
|
| 186 |
-
r'\d+\s*[\+\-\*\/\%]\s*\d+', r'squared', r'cubed', r'square root'
|
| 187 |
-
]
|
| 188 |
-
|
| 189 |
-
for pattern in math_patterns:
|
| 190 |
-
if re.search(pattern, question.lower()):
|
| 191 |
-
return True, "calculator"
|
| 192 |
-
|
| 193 |
-
# Check for factual questions that might need search
|
| 194 |
-
search_patterns = [
|
| 195 |
-
r'^what is', r'^who is', r'^where is', r'^when', r'^how many',
|
| 196 |
-
r'capital of', r'largest', r'tallest', r'population', r'president',
|
| 197 |
-
r'temperature', r'boiling point', r'freezing point', r'speed of'
|
| 198 |
-
]
|
| 199 |
-
|
| 200 |
-
for pattern in search_patterns:
|
| 201 |
-
if re.search(pattern, question.lower()):
|
| 202 |
-
return True, "search"
|
| 203 |
-
|
| 204 |
-
# Check for date-related questions
|
| 205 |
-
date_patterns = [r'what day is today', r'current date', r'today\'s date']
|
| 206 |
-
|
| 207 |
-
for pattern in date_patterns:
|
| 208 |
-
if re.search(pattern, question.lower()):
|
| 209 |
-
return True, "date"
|
| 210 |
-
|
| 211 |
-
# Default to direct answer
|
| 212 |
-
return False, None
|
| 213 |
-
|
| 214 |
-
def _extract_math_expression(self, question: str) -> str:
|
| 215 |
-
"""Extract a mathematical expression from the question"""
|
| 216 |
-
# Look for common pattern: "Calculate X" or "What is X"
|
| 217 |
-
patterns = [
|
| 218 |
-
r'calculate\s+(.*?)(?:\?|$)',
|
| 219 |
-
r'what is\s+(.*?)(?:\?|$)',
|
| 220 |
-
r'compute\s+(.*?)(?:\?|$)',
|
| 221 |
-
r'find\s+(.*?)(?:\?|$)',
|
| 222 |
-
r'how much is\s+(.*?)(?:\?|$)'
|
| 223 |
-
]
|
| 224 |
-
|
| 225 |
-
for pattern in patterns:
|
| 226 |
-
match = re.search(pattern, question.lower())
|
| 227 |
-
if match:
|
| 228 |
-
expression = match.group(1).strip()
|
| 229 |
-
# Further clean the expression
|
| 230 |
-
expression = re.sub(r'[^0-9+\-*/().%\s]', '', expression)
|
| 231 |
-
return expression
|
| 232 |
-
|
| 233 |
-
# If no clear pattern, attempt to extract any mathematical operation
|
| 234 |
-
nums_and_ops = re.findall(r'(\d+(?:\.\d+)?|\+|\-|\*|\/|\(|\)|\%)', question)
|
| 235 |
-
if nums_and_ops:
|
| 236 |
-
return ''.join(nums_and_ops)
|
| 237 |
-
|
| 238 |
-
return ""
|
| 239 |
-
|
| 240 |
-
def _extract_direct_answer(self, question: str, search_result: str) -> str:
|
| 241 |
-
"""Extract a concise answer from search results based on the question"""
|
| 242 |
-
# For simple factual questions, return the search result directly
|
| 243 |
-
return search_result
|
| 244 |
-
|
| 245 |
-
def _get_answer_from_llm(self, question: str) -> str:
|
| 246 |
-
"""Get an answer from the LLM with appropriate prompting"""
|
| 247 |
-
prompt = f"""
|
| 248 |
-
Answer the following question with a very concise, direct response:
|
| 249 |
-
|
| 250 |
-
Question: {question}
|
| 251 |
-
|
| 252 |
-
Answer in 1-2 sentences maximum, focusing only on the specific information requested.
|
| 253 |
-
"""
|
| 254 |
-
|
| 255 |
-
# Expanded common answers to reduce LLM API dependence
|
| 256 |
-
common_answers = {
|
| 257 |
-
"what color is the sky": "Blue.",
|
| 258 |
-
"how many days in a week": "7 days.",
|
| 259 |
-
"how many months in a year": "12 months.",
|
| 260 |
-
"what is the capital of france": "Paris.",
|
| 261 |
-
"what is the capital of japan": "Tokyo.",
|
| 262 |
-
"what is the capital of italy": "Rome.",
|
| 263 |
-
"what is the capital of germany": "Berlin.",
|
| 264 |
-
"what is the capital of spain": "Madrid.",
|
| 265 |
-
"what is the capital of united states": "Washington, D.C.",
|
| 266 |
-
"what is the capital of china": "Beijing.",
|
| 267 |
-
"what is the capital of russia": "Moscow.",
|
| 268 |
-
"what is the capital of canada": "Ottawa.",
|
| 269 |
-
"what is the capital of australia": "Canberra.",
|
| 270 |
-
"what is the capital of brazil": "Brasília.",
|
| 271 |
-
"what is water made of": "H2O (hydrogen and oxygen).",
|
| 272 |
-
"who wrote romeo and juliet": "William Shakespeare.",
|
| 273 |
-
"who painted the mona lisa": "Leonardo da Vinci.",
|
| 274 |
-
"what is the largest ocean": "The Pacific Ocean.",
|
| 275 |
-
"what is the smallest planet": "Mercury.",
|
| 276 |
-
"what is the largest planet": "Jupiter.",
|
| 277 |
-
"who invented electricity": "Electricity wasn't invented but discovered through contributions from many scientists including Benjamin Franklin, Michael Faraday, and Thomas Edison.",
|
| 278 |
-
"how many continents are there": "There are 7 continents: Africa, Antarctica, Asia, Europe, North America, Australia/Oceania, and South America.",
|
| 279 |
-
"what is the largest country": "Russia is the largest country by land area.",
|
| 280 |
-
"what is the most spoken language": "Mandarin Chinese is the most spoken native language in the world.",
|
| 281 |
-
"what is the tallest mountain": "Mount Everest is the tallest mountain above sea level at 8,848.86 meters."
|
| 282 |
-
}
|
| 283 |
-
|
| 284 |
-
# Clean up the question for better matching
|
| 285 |
-
clean_question = question.lower().strip('?').strip()
|
| 286 |
-
|
| 287 |
-
# Check if we have a hardcoded answer
|
| 288 |
-
if clean_question in common_answers:
|
| 289 |
-
return common_answers[clean_question]
|
| 290 |
-
|
| 291 |
-
# Try partial matching for more flexibility
|
| 292 |
-
for key, answer in common_answers.items():
|
| 293 |
-
if clean_question in key or key in clean_question:
|
| 294 |
-
# Only return if it's a close match
|
| 295 |
-
if len(clean_question) > len(key) * 0.7 or len(key) > len(clean_question) * 0.7:
|
| 296 |
-
return answer
|
| 297 |
-
|
| 298 |
-
# If no hardcoded answer, use the LLM
|
| 299 |
-
return self.llm.query_llm(prompt)
|
| 300 |
|
| 301 |
-
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 302 |
"""
|
| 303 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 304 |
and displays the results.
|
|
@@ -317,14 +49,13 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 317 |
questions_url = f"{api_url}/questions"
|
| 318 |
submit_url = f"{api_url}/submit"
|
| 319 |
|
| 320 |
-
# 1. Instantiate Agent (
|
| 321 |
try:
|
| 322 |
agent = BasicAgent()
|
| 323 |
except Exception as e:
|
| 324 |
print(f"Error instantiating agent: {e}")
|
| 325 |
return f"Error initializing agent: {e}", None
|
| 326 |
-
|
| 327 |
-
# In the case of an app running as a hugging Face space, this link points toward your codebase
|
| 328 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 329 |
print(agent_code)
|
| 330 |
|
|
@@ -422,7 +153,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 422 |
|
| 423 |
# --- Build Gradio Interface using Blocks ---
|
| 424 |
with gr.Blocks() as demo:
|
| 425 |
-
gr.Markdown("#
|
| 426 |
gr.Markdown(
|
| 427 |
"""
|
| 428 |
**Instructions:**
|
|
@@ -433,6 +164,7 @@ with gr.Blocks() as demo:
|
|
| 433 |
**Disclaimers:**
|
| 434 |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
| 435 |
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
|
|
|
| 436 |
"""
|
| 437 |
)
|
| 438 |
|
|
@@ -441,6 +173,7 @@ with gr.Blocks() as demo:
|
|
| 441 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 442 |
|
| 443 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
|
|
|
| 444 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 445 |
|
| 446 |
run_button.click(
|
|
@@ -469,5 +202,5 @@ if __name__ == "__main__":
|
|
| 469 |
|
| 470 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 471 |
|
| 472 |
-
print("Launching Gradio Interface for
|
| 473 |
demo.launch(debug=True, share=False)
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# --- Basic Agent Definition ---
|
| 13 |
+
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 14 |
class BasicAgent:
|
| 15 |
def __init__(self):
|
| 16 |
+
print("BasicAgent initialized.")
|
| 17 |
+
# Initialize the model
|
| 18 |
+
#model = HfApiModel()
|
| 19 |
+
model = OpenAIServerModel(model_id="gpt-4o")
|
| 20 |
+
# Initialize the search tool
|
| 21 |
+
search_tool = DuckDuckGoSearchTool()
|
| 22 |
+
# Initialize Agent
|
| 23 |
+
self.agent = CodeAgent(
|
| 24 |
+
model = model,
|
| 25 |
+
tools=[search_tool]
|
| 26 |
+
)
|
| 27 |
def __call__(self, question: str) -> str:
|
| 28 |
+
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 29 |
+
fixed_answer =self.agent.run(question)
|
| 30 |
+
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 31 |
+
return fixed_answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 34 |
"""
|
| 35 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 36 |
and displays the results.
|
|
|
|
| 49 |
questions_url = f"{api_url}/questions"
|
| 50 |
submit_url = f"{api_url}/submit"
|
| 51 |
|
| 52 |
+
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 53 |
try:
|
| 54 |
agent = BasicAgent()
|
| 55 |
except Exception as e:
|
| 56 |
print(f"Error instantiating agent: {e}")
|
| 57 |
return f"Error initializing agent: {e}", None
|
| 58 |
+
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
|
|
|
| 59 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 60 |
print(agent_code)
|
| 61 |
|
|
|
|
| 153 |
|
| 154 |
# --- Build Gradio Interface using Blocks ---
|
| 155 |
with gr.Blocks() as demo:
|
| 156 |
+
gr.Markdown("# Basic Agent Evaluation Runner")
|
| 157 |
gr.Markdown(
|
| 158 |
"""
|
| 159 |
**Instructions:**
|
|
|
|
| 164 |
**Disclaimers:**
|
| 165 |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
| 166 |
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
| 167 |
+
Please note that this version requires an OpenAI Key to run.
|
| 168 |
"""
|
| 169 |
)
|
| 170 |
|
|
|
|
| 173 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 174 |
|
| 175 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 176 |
+
# Removed max_rows=10 from DataFrame constructor
|
| 177 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 178 |
|
| 179 |
run_button.click(
|
|
|
|
| 202 |
|
| 203 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 204 |
|
| 205 |
+
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 206 |
demo.launch(debug=True, share=False)
|