Spaces:
Sleeping
Sleeping
File size: 5,212 Bytes
2b967a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import os
import sys
import json
import tempfile
from typing import List, Dict, Any, Optional
import traceback
# vimport dotenv
# Load environment variables from .env file
# dotenv.load_dotenv()
# Import our agent
from agent import QAgent
# Simulation of GAIA benchmark questions
SAMPLE_QUESTIONS = [
{
"task_id": "task_001",
"question": "What is the capital of France?",
"expected_answer": "Paris",
"has_file": False,
"file_content": None
}
]
SAMPLE_QUESTIONS_OUT = [
{
"task_id": "task_002",
"question": "What is the square root of 144?",
"expected_answer": "12",
"has_file": False,
"file_content": None
},
{
"task_id": "task_003",
"question": "If a train travels at 60 miles per hour, how far will it travel in 2.5 hours?",
"expected_answer": "150 miles",
"has_file": False,
"file_content": None
},
{
"task_id": "task_004",
"question": ".rewsna eht sa 'thgir' drow eht etirw ,tfel fo etisoppo eht si tahW",
"expected_answer": "right",
"has_file": False,
"file_content": None
},
{
"task_id": "task_005",
"question": "Analyze the data in the attached CSV file and tell me the total sales for the month of January.",
"expected_answer": "$10,250.75",
"has_file": True,
"file_content": """Date,Product,Quantity,Price,Total
2023-01-05,Widget A,10,25.99,259.90
2023-01-12,Widget B,5,45.50,227.50
2023-01-15,Widget C,20,50.25,1005.00
2023-01-20,Widget A,15,25.99,389.85
2023-01-25,Widget B,8,45.50,364.00
2023-01-28,Widget D,100,80.04,8004.50"""
},
{
"task_id": "task_006",
"question": "I'm making a grocery list for my mom, but she's a picky eater. She only eats foods that don't contain the letter 'e'. List 5 common fruits and vegetables she can eat.",
"expected_answer": "Banana, Kiwi, Corn, Fig, Taro",
"has_file": False,
"file_content": None
},
{
"task_id": "task_007",
"question": "How many studio albums were published by Mercedes Sosa between 1972 and 1985?",
"expected_answer": "12",
"has_file": False,
"file_content": None
},
{
"task_id": "task_008",
"question": "In the video https://www.youtube.com/watch?v=L1vXC1KMRd0, what color is primarily associated with the main character?",
"expected_answer": "Blue",
"has_file": False,
"file_content": None
}
]
def save_test_file(task_id: str, content: str) -> str:
"""Save a test file to a temporary location."""
temp_dir = tempfile.gettempdir()
file_path = os.path.join(temp_dir, f"test_file_{task_id}.csv")
with open(file_path, 'w') as f:
f.write(content)
return file_path
def run_GAIA_questions_simu():
"""
Used only during development for test that simulate GAIA questions.
"""
# 1. Instantiate Agent
try:
agent = QAgent()
except Exception as e:
print(f"Error instantiating agent for GAIA simulation: {e}")
return f"Error initializing agent for GAIA simulation: {e}", None
results = []
correct_count = 0
total_count = len(SAMPLE_QUESTIONS)
for idx, question_data in enumerate(SAMPLE_QUESTIONS):
task_id = question_data["task_id"]
question = question_data["question"]
expected = question_data["expected_answer"]
print(f"\n{'='*80}")
print(f"Question {idx+1}/{total_count}: {question}")
print(f"Expected: {expected}")
# Process any attached file
# file_path = None
# if question_data["has_file"] and question_data["file_content"]:
# file_path = save_test_file(task_id, question_data["file_content"])
# print(f"Created test file: {file_path}")
# Get answer from agent
try:
answer = agent.invoke(question) # , file_path)
print(f"Agent answer: {answer}")
# Check if answer matches expected
is_correct = answer.lower() == expected.lower()
if is_correct:
correct_count += 1
print(f"✅ CORRECT")
else:
print(f"❌ INCORRECT - Expected: {expected}")
results.append({
"task_id": task_id,
"question": question,
"expected": expected,
"answer": answer,
"is_correct": is_correct
})
except Exception as e:
error_details = traceback.format_exc()
print(f"Error processing question: {e}\n{error_details}")
results.append({
"task_id": task_id,
"question": question,
"expected": expected,
"answer": f"ERROR: {str(e)}",
"is_correct": False
})
# Print summary
accuracy = (correct_count / total_count) * 100
print(f"\n{'='*80}")
print(f"Test Results: {correct_count}/{total_count} correct ({accuracy:.1f}%)")
return results
|