Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
"""
|
2 |
-
|
3 |
"""
|
4 |
|
5 |
import os
|
@@ -9,38 +9,70 @@ import pandas as pd
|
|
9 |
import json
|
10 |
import re
|
11 |
from typing import List, Dict, Any, Optional, Callable, Union
|
|
|
12 |
|
13 |
# --- Constants ---
|
14 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
|
15 |
|
16 |
-
class
|
17 |
"""
|
18 |
-
|
19 |
-
|
20 |
"""
|
21 |
|
22 |
-
def __init__(self):
|
23 |
-
"""Initialize the agent with
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def __call__(self, question: str) -> str:
|
33 |
-
"""Process a question and return an
|
34 |
print(f"Processing question: {question}")
|
35 |
|
36 |
-
#
|
37 |
-
|
|
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
def
|
43 |
-
"""
|
44 |
question_lower = question.lower()
|
45 |
|
46 |
# Check for calculation questions
|
@@ -48,64 +80,42 @@ class GAIAAgent:
|
|
48 |
"calculate", "compute", "sum", "difference",
|
49 |
"product", "divide", "plus", "minus", "times"
|
50 |
]):
|
51 |
-
return
|
52 |
|
53 |
# Check for image analysis questions
|
54 |
elif any(keyword in question_lower for keyword in [
|
55 |
"image", "picture", "photo", "graph", "chart", "diagram"
|
56 |
]):
|
57 |
-
return
|
58 |
|
59 |
-
# Check for factual questions
|
60 |
elif any(keyword in question_lower for keyword in [
|
61 |
"who", "what", "where", "when", "why", "how"
|
62 |
]):
|
63 |
-
return
|
64 |
|
65 |
-
# Default
|
66 |
else:
|
67 |
-
return
|
68 |
|
69 |
-
def
|
70 |
-
"""
|
71 |
-
|
|
|
|
|
|
|
72 |
|
73 |
-
#
|
74 |
-
|
|
|
75 |
|
76 |
-
|
77 |
-
# Determine operation type
|
78 |
-
if any(op in question_lower for op in ["sum", "add", "plus", "+"]):
|
79 |
-
result = sum(int(num) for num in numbers)
|
80 |
-
return f"The sum of the numbers is {result}"
|
81 |
-
|
82 |
-
elif any(op in question_lower for op in ["difference", "subtract", "minus", "-"]):
|
83 |
-
result = int(numbers[0]) - int(numbers[1])
|
84 |
-
return f"The difference between {numbers[0]} and {numbers[1]} is {result}"
|
85 |
-
|
86 |
-
elif any(op in question_lower for op in ["product", "multiply", "times", "*"]):
|
87 |
-
result = int(numbers[0]) * int(numbers[1])
|
88 |
-
return f"The product of {numbers[0]} and {numbers[1]} is {result}"
|
89 |
-
|
90 |
-
elif any(op in question_lower for op in ["divide", "division", "/"]):
|
91 |
-
if int(numbers[1]) != 0:
|
92 |
-
result = int(numbers[0]) / int(numbers[1])
|
93 |
-
return f"The result of dividing {numbers[0]} by {numbers[1]} is {result}"
|
94 |
-
else:
|
95 |
-
return "Cannot divide by zero"
|
96 |
-
|
97 |
-
# If we couldn't parse the calculation specifically
|
98 |
-
return "I'll calculate this for you: " + question
|
99 |
-
|
100 |
-
def _handle_image_analysis(self, question: str) -> str:
|
101 |
-
"""Handle questions about images or visual content."""
|
102 |
-
return "Based on the image, I can see several key elements that help answer your question. The main subject appears to be [description] which indicates [answer]."
|
103 |
|
104 |
-
def
|
105 |
-
"""
|
106 |
question_lower = question.lower()
|
107 |
|
108 |
-
# Map question words to appropriate responses
|
109 |
if "who" in question_lower:
|
110 |
return "The person involved is a notable figure in this field with significant contributions and achievements."
|
111 |
elif "when" in question_lower:
|
@@ -120,11 +130,7 @@ class GAIAAgent:
|
|
120 |
return "The process involves several key steps that must be followed in sequence to achieve the desired outcome."
|
121 |
|
122 |
# Fallback for other question types
|
123 |
-
return "
|
124 |
-
|
125 |
-
def _handle_general_knowledge(self, question: str) -> str:
|
126 |
-
"""Handle general knowledge questions that don't fit other categories."""
|
127 |
-
return "Based on my analysis, the answer to your question involves several important factors. First, we need to consider the context and specific details mentioned. Taking all available information into account, the most accurate response would be a comprehensive explanation that addresses all aspects of your query."
|
128 |
|
129 |
|
130 |
class EvaluationRunner:
|
@@ -294,7 +300,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, *args):
|
|
294 |
|
295 |
# Initialize agent and evaluation runner
|
296 |
try:
|
297 |
-
agent
|
|
|
298 |
runner = EvaluationRunner()
|
299 |
except Exception as e:
|
300 |
error_msg = f"Error initializing agent or evaluation runner: {e}"
|
@@ -307,7 +314,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, *args):
|
|
307 |
|
308 |
# --- Gradio Interface ---
|
309 |
with gr.Blocks() as demo:
|
310 |
-
gr.Markdown("# GAIA Agent Evaluation Runner")
|
311 |
|
312 |
gr.Markdown("## Instructions:")
|
313 |
gr.Markdown("1. Log in to your Hugging Face account using the button below.")
|
@@ -316,7 +323,7 @@ with gr.Blocks() as demo:
|
|
316 |
|
317 |
gr.Markdown("---")
|
318 |
|
319 |
-
gr.Markdown("**Note:** The evaluation process may take
|
320 |
|
321 |
with gr.Row():
|
322 |
login_button = gr.LoginButton(value="Sign in with Hugging Face")
|
|
|
1 |
"""
|
2 |
+
Improved GAIA Agent with LLM Integration for Hugging Face Course
|
3 |
"""
|
4 |
|
5 |
import os
|
|
|
9 |
import json
|
10 |
import re
|
11 |
from typing import List, Dict, Any, Optional, Callable, Union
|
12 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
13 |
|
14 |
# --- Constants ---
|
15 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
16 |
+
DEFAULT_MODEL = "google/flan-t5-small" # Smaller model for faster loading
|
17 |
|
18 |
+
class LLMGAIAAgent:
|
19 |
"""
|
20 |
+
An improved GAIA agent that uses a language model to generate responses
|
21 |
+
instead of template-based answers.
|
22 |
"""
|
23 |
|
24 |
+
def __init__(self, model_name=DEFAULT_MODEL):
|
25 |
+
"""Initialize the agent with a language model."""
|
26 |
+
print(f"Initializing LLMGAIAAgent with model: {model_name}")
|
27 |
+
try:
|
28 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
29 |
+
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
30 |
+
self.model_name = model_name
|
31 |
+
print(f"Successfully loaded model: {model_name}")
|
32 |
+
except Exception as e:
|
33 |
+
print(f"Error loading model: {e}")
|
34 |
+
print("Falling back to template-based responses")
|
35 |
+
self.model = None
|
36 |
+
self.tokenizer = None
|
37 |
+
self.model_name = None
|
38 |
|
39 |
def __call__(self, question: str) -> str:
|
40 |
+
"""Process a question and return an answer using the language model."""
|
41 |
print(f"Processing question: {question}")
|
42 |
|
43 |
+
# Check if model is available
|
44 |
+
if self.model is None or self.tokenizer is None:
|
45 |
+
return self._fallback_response(question)
|
46 |
|
47 |
+
try:
|
48 |
+
# Prepare prompt based on question type
|
49 |
+
prompt = self._prepare_prompt(question)
|
50 |
+
|
51 |
+
# Generate response using the model
|
52 |
+
inputs = self.tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
|
53 |
+
outputs = self.model.generate(
|
54 |
+
inputs["input_ids"],
|
55 |
+
max_length=150,
|
56 |
+
min_length=20,
|
57 |
+
temperature=0.7,
|
58 |
+
top_p=0.9,
|
59 |
+
do_sample=True,
|
60 |
+
num_return_sequences=1
|
61 |
+
)
|
62 |
+
|
63 |
+
# Decode the response
|
64 |
+
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
65 |
+
|
66 |
+
# Clean up the response if needed
|
67 |
+
response = self._clean_response(response)
|
68 |
+
|
69 |
+
return response
|
70 |
+
except Exception as e:
|
71 |
+
print(f"Error generating response: {e}")
|
72 |
+
return self._fallback_response(question)
|
73 |
|
74 |
+
def _prepare_prompt(self, question: str) -> str:
|
75 |
+
"""Prepare an appropriate prompt based on the question type."""
|
76 |
question_lower = question.lower()
|
77 |
|
78 |
# Check for calculation questions
|
|
|
80 |
"calculate", "compute", "sum", "difference",
|
81 |
"product", "divide", "plus", "minus", "times"
|
82 |
]):
|
83 |
+
return f"Solve this math problem step by step: {question}"
|
84 |
|
85 |
# Check for image analysis questions
|
86 |
elif any(keyword in question_lower for keyword in [
|
87 |
"image", "picture", "photo", "graph", "chart", "diagram"
|
88 |
]):
|
89 |
+
return f"Describe what might be seen in an image related to this question: {question}"
|
90 |
|
91 |
+
# Check for factual questions
|
92 |
elif any(keyword in question_lower for keyword in [
|
93 |
"who", "what", "where", "when", "why", "how"
|
94 |
]):
|
95 |
+
return f"Answer this factual question concisely and accurately: {question}"
|
96 |
|
97 |
+
# Default prompt for general knowledge
|
98 |
else:
|
99 |
+
return f"Provide a concise, informative answer to this question: {question}"
|
100 |
|
101 |
+
def _clean_response(self, response: str) -> str:
|
102 |
+
"""Clean up the model's response if needed."""
|
103 |
+
# Remove any prefixes like "Answer:" or "Response:"
|
104 |
+
for prefix in ["Answer:", "Response:", "A:"]:
|
105 |
+
if response.startswith(prefix):
|
106 |
+
response = response[len(prefix):].strip()
|
107 |
|
108 |
+
# Ensure the response is not too short
|
109 |
+
if len(response) < 10:
|
110 |
+
return self._fallback_response("general")
|
111 |
|
112 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
+
def _fallback_response(self, question: str) -> str:
|
115 |
+
"""Provide a fallback response if the model fails."""
|
116 |
question_lower = question.lower()
|
117 |
|
118 |
+
# Map question words to appropriate responses (similar to original GAIAAgent)
|
119 |
if "who" in question_lower:
|
120 |
return "The person involved is a notable figure in this field with significant contributions and achievements."
|
121 |
elif "when" in question_lower:
|
|
|
130 |
return "The process involves several key steps that must be followed in sequence to achieve the desired outcome."
|
131 |
|
132 |
# Fallback for other question types
|
133 |
+
return "Based on my analysis, the answer to your question involves several important factors. First, we need to consider the context and specific details mentioned."
|
|
|
|
|
|
|
|
|
134 |
|
135 |
|
136 |
class EvaluationRunner:
|
|
|
300 |
|
301 |
# Initialize agent and evaluation runner
|
302 |
try:
|
303 |
+
# Use the LLM-based agent instead of the template-based one
|
304 |
+
agent = LLMGAIAAgent()
|
305 |
runner = EvaluationRunner()
|
306 |
except Exception as e:
|
307 |
error_msg = f"Error initializing agent or evaluation runner: {e}"
|
|
|
314 |
|
315 |
# --- Gradio Interface ---
|
316 |
with gr.Blocks() as demo:
|
317 |
+
gr.Markdown("# GAIA Agent Evaluation Runner (LLM-Enhanced)")
|
318 |
|
319 |
gr.Markdown("## Instructions:")
|
320 |
gr.Markdown("1. Log in to your Hugging Face account using the button below.")
|
|
|
323 |
|
324 |
gr.Markdown("---")
|
325 |
|
326 |
+
gr.Markdown("**Note:** This version uses a language model to generate responses. The evaluation process may take longer than the template-based version.")
|
327 |
|
328 |
with gr.Row():
|
329 |
login_button = gr.LoginButton(value="Sign in with Hugging Face")
|