Update agent.py
Browse files
agent.py
CHANGED
@@ -8,24 +8,44 @@ class GaiaAgent:
|
|
8 |
def __init__(self):
|
9 |
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
10 |
self.instructions = (
|
11 |
-
"You are solving GAIA benchmark questions. "
|
12 |
-
"If
|
13 |
-
"
|
14 |
)
|
15 |
self.api_url = "https://agents-course-unit4-scoring.hf.space"
|
16 |
|
17 |
-
def
|
18 |
try:
|
19 |
df = pd.read_csv(io.StringIO(csv_text))
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
except Exception as e:
|
26 |
-
return f"[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
def fetch_file_context(self, task_id: str) -> str:
|
29 |
try:
|
30 |
url = f"{self.api_url}/files/{task_id}"
|
31 |
response = requests.get(url, timeout=10)
|
@@ -33,26 +53,26 @@ class GaiaAgent:
|
|
33 |
content_type = response.headers.get("Content-Type", "")
|
34 |
|
35 |
if "csv" in content_type or url.endswith(".csv"):
|
36 |
-
return self.
|
37 |
elif "json" in content_type:
|
38 |
-
return f"JSON
|
39 |
elif "text/plain" in content_type:
|
40 |
return f"Text Sample: {response.text[:1000]}"
|
41 |
elif "pdf" in content_type:
|
42 |
-
return "[PDF detected. OCR not supported
|
43 |
else:
|
44 |
return f"[Unsupported file type: {content_type}]"
|
45 |
|
46 |
except Exception as e:
|
47 |
-
return f"[
|
48 |
|
49 |
def __call__(self, question: str, task_id: str = None) -> str:
|
50 |
-
|
51 |
if task_id:
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
prompt = f"{self.instructions}\n\n{
|
56 |
|
57 |
response = self.client.chat.completions.create(
|
58 |
model="gpt-4-turbo",
|
|
|
8 |
def __init__(self):
|
9 |
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
10 |
self.instructions = (
|
11 |
+
"You are a reasoning assistant solving GAIA benchmark questions. "
|
12 |
+
"If data is provided, analyze it logically and extract the relevant facts. "
|
13 |
+
"Think step by step. Output only the final answer."
|
14 |
)
|
15 |
self.api_url = "https://agents-course-unit4-scoring.hf.space"
|
16 |
|
17 |
+
def analyze_csv(self, csv_text: str, question: str) -> str:
|
18 |
try:
|
19 |
df = pd.read_csv(io.StringIO(csv_text))
|
20 |
+
question_lower = question.lower()
|
21 |
+
|
22 |
+
if any(k in question_lower for k in ["lowest", "cheapest", "minimum"]):
|
23 |
+
col = self._detect_column(df, ["price", "cost", "amount"])
|
24 |
+
if col:
|
25 |
+
row = df.sort_values(by=col).iloc[0].to_dict()
|
26 |
+
return f"Lowest {col}: {row}"
|
27 |
+
elif any(k in question_lower for k in ["highest", "most expensive", "maximum"]):
|
28 |
+
col = self._detect_column(df, ["price", "score", "rating"])
|
29 |
+
if col:
|
30 |
+
row = df.sort_values(by=col, ascending=False).iloc[0].to_dict()
|
31 |
+
return f"Highest {col}: {row}"
|
32 |
+
elif "how many" in question_lower:
|
33 |
+
return f"Total rows: {len(df)}"
|
34 |
+
# fallback
|
35 |
+
sample = df.iloc[0].to_dict()
|
36 |
+
return f"Sample row: {sample}"
|
37 |
+
|
38 |
except Exception as e:
|
39 |
+
return f"[CSV parsing failed: {e}]"
|
40 |
+
|
41 |
+
def _detect_column(self, df, candidates):
|
42 |
+
for col in df.columns:
|
43 |
+
for name in candidates:
|
44 |
+
if name in col.lower():
|
45 |
+
return col
|
46 |
+
return None
|
47 |
|
48 |
+
def fetch_file_context(self, task_id: str, question: str) -> str:
|
49 |
try:
|
50 |
url = f"{self.api_url}/files/{task_id}"
|
51 |
response = requests.get(url, timeout=10)
|
|
|
53 |
content_type = response.headers.get("Content-Type", "")
|
54 |
|
55 |
if "csv" in content_type or url.endswith(".csv"):
|
56 |
+
return self.analyze_csv(response.text, question)
|
57 |
elif "json" in content_type:
|
58 |
+
return f"JSON Preview: {response.text[:1000]}"
|
59 |
elif "text/plain" in content_type:
|
60 |
return f"Text Sample: {response.text[:1000]}"
|
61 |
elif "pdf" in content_type:
|
62 |
+
return "[PDF detected. OCR not supported.]"
|
63 |
else:
|
64 |
return f"[Unsupported file type: {content_type}]"
|
65 |
|
66 |
except Exception as e:
|
67 |
+
return f"[Error fetching file: {e}]"
|
68 |
|
69 |
def __call__(self, question: str, task_id: str = None) -> str:
|
70 |
+
file_fact = ""
|
71 |
if task_id:
|
72 |
+
file_fact = self.fetch_file_context(task_id, question)
|
73 |
+
file_fact = f"FILE INSIGHTS:\n{file_fact}\n"
|
74 |
|
75 |
+
prompt = f"{self.instructions}\n\n{file_fact}QUESTION: {question}\nANSWER:"
|
76 |
|
77 |
response = self.client.chat.completions.create(
|
78 |
model="gpt-4-turbo",
|