Update agent.py
Browse files
agent.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#
|
2 |
import os
|
3 |
import re
|
4 |
import io
|
@@ -25,17 +25,10 @@ class GaiaAgent:
|
|
25 |
return None, None
|
26 |
|
27 |
def get_step_by_step_plan(self, question):
|
28 |
-
steps_prompt = f"""
|
29 |
-
You are an expert planner. Break down the question into a clear plan with 2–5 steps.
|
30 |
-
|
31 |
-
Question: {question}
|
32 |
-
|
33 |
-
Steps:
|
34 |
-
"""
|
35 |
try:
|
36 |
response = self.client.chat.completions.create(
|
37 |
model="gpt-4-turbo",
|
38 |
-
messages=[{"role": "user", "content":
|
39 |
temperature=0,
|
40 |
timeout=15
|
41 |
)
|
@@ -44,76 +37,84 @@ Steps:
|
|
44 |
return "Step 1: Try to understand the question."
|
45 |
|
46 |
def search_with_steps(self, question, steps):
|
47 |
-
combined_prompt = f"""
|
48 |
-
You are a knowledgeable assistant. Given the following plan:
|
49 |
-
|
50 |
-
{steps}
|
51 |
-
|
52 |
-
Answer the original question using verified and precise information.
|
53 |
-
Return only the final answer, nothing else.
|
54 |
-
|
55 |
-
Question: {question}
|
56 |
-
"""
|
57 |
try:
|
58 |
-
web_context = self.search_tool.run(question)[:2000]
|
59 |
response = self.client.chat.completions.create(
|
60 |
model="gpt-4-turbo",
|
61 |
messages=[
|
62 |
-
{"role": "system", "content": f"Use only this
|
63 |
-
{"role": "user", "content":
|
64 |
],
|
65 |
temperature=0,
|
66 |
timeout=30
|
67 |
)
|
68 |
return response.choices[0].message.content.strip()
|
69 |
except:
|
70 |
-
return ""
|
71 |
|
72 |
def handle_file(self, content, ctype, question):
|
73 |
if not content:
|
74 |
-
return ""
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
|
|
|
|
93 |
df = pd.read_excel(io.BytesIO(content), engine="openpyxl")
|
94 |
df.columns = [c.strip().lower() for c in df.columns]
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
return "
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
def format_answer(self, raw, question):
|
|
|
|
|
104 |
raw = raw.strip().strip("\"'")
|
105 |
q = question.lower()
|
|
|
|
|
106 |
if "algebraic notation" in q:
|
107 |
match = re.search(r"[KQBNR]?[a-h]?[1-8]?x?[a-h][1-8][+#]?", raw)
|
108 |
-
return match.group(0) if match else raw
|
109 |
if "award number" in q:
|
110 |
match = re.search(r"80NSSC[0-9A-Z]+", raw)
|
111 |
return match.group(0) if match else raw
|
112 |
if "usd" in q:
|
113 |
-
m = re.search(r"
|
114 |
return f"${m.group()}" if m else "$0.00"
|
115 |
if "first name" in q:
|
116 |
-
return raw.split()[0]
|
117 |
try:
|
118 |
return str(w2n.word_to_num(raw))
|
119 |
except:
|
@@ -121,12 +122,13 @@ Question: {question}
|
|
121 |
return m.group(0) if m else raw
|
122 |
|
123 |
def __call__(self, question, task_id=None):
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
128 |
return self.format_answer(context, question)
|
129 |
-
|
130 |
-
|
131 |
-
raw = self.search_with_steps(question, steps)
|
132 |
-
return self.format_answer(raw, question)
|
|
|
1 |
+
# agent_v42.py — Wersja stabilna z obsługą błędów, filtrowaniem i lepszym parsingiem
|
2 |
import os
|
3 |
import re
|
4 |
import io
|
|
|
25 |
return None, None
|
26 |
|
27 |
def get_step_by_step_plan(self, question):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
try:
|
29 |
response = self.client.chat.completions.create(
|
30 |
model="gpt-4-turbo",
|
31 |
+
messages=[{"role": "user", "content": f"Break down this question into 2–5 logical steps:\n{question}"}],
|
32 |
temperature=0,
|
33 |
timeout=15
|
34 |
)
|
|
|
37 |
return "Step 1: Try to understand the question."
|
38 |
|
39 |
def search_with_steps(self, question, steps):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
try:
|
41 |
+
web_context = self.search_tool.run(question)[:2000] or "No useful info found."
|
42 |
response = self.client.chat.completions.create(
|
43 |
model="gpt-4-turbo",
|
44 |
messages=[
|
45 |
+
{"role": "system", "content": f"Use only this data:\n{web_context}"},
|
46 |
+
{"role": "user", "content": f"Follow this plan:\n{steps}\n\nAnswer this question concisely:\n{question}"}
|
47 |
],
|
48 |
temperature=0,
|
49 |
timeout=30
|
50 |
)
|
51 |
return response.choices[0].message.content.strip()
|
52 |
except:
|
53 |
+
return "[ERROR: step execution failed]"
|
54 |
|
55 |
def handle_file(self, content, ctype, question):
|
56 |
if not content:
|
57 |
+
return "[NO FILE DATA]"
|
58 |
+
try:
|
59 |
+
if "image" in ctype:
|
60 |
+
b64 = base64.b64encode(content).decode("utf-8")
|
61 |
+
result = self.client.chat.completions.create(
|
62 |
+
model="gpt-4o",
|
63 |
+
messages=[
|
64 |
+
{"role": "system", "content": "You're a chess assistant. Respond with only the best move in algebraic notation."},
|
65 |
+
{"role": "user", "content": [
|
66 |
+
{"type": "text", "text": question},
|
67 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64}"}}
|
68 |
+
]}
|
69 |
+
]
|
70 |
+
)
|
71 |
+
return result.choices[0].message.content.strip()
|
72 |
+
if "audio" in ctype:
|
73 |
+
with open("/tmp/audio.mp3", "wb") as f:
|
74 |
+
f.write(content)
|
75 |
+
result = self.client.audio.transcriptions.create(model="whisper-1", file=open("/tmp/audio.mp3", "rb"))
|
76 |
+
return result.text[:2000]
|
77 |
+
if "excel" in ctype:
|
78 |
df = pd.read_excel(io.BytesIO(content), engine="openpyxl")
|
79 |
df.columns = [c.strip().lower() for c in df.columns]
|
80 |
+
if 'category' in df.columns and 'sales' in df.columns:
|
81 |
+
df = df.dropna(subset=['category', 'sales'])
|
82 |
+
df = df[df['category'].str.lower().str.strip() == 'food']
|
83 |
+
df['sales'] = pd.to_numeric(df['sales'], errors='coerce')
|
84 |
+
return f"${df['sales'].sum():.2f}"
|
85 |
+
return "[NO FOOD SALES DATA]"
|
86 |
+
return content.decode("utf-8", errors="ignore")[:3000]
|
87 |
+
except Exception as e:
|
88 |
+
return f"[FILE ERROR: {e}]"
|
89 |
+
|
90 |
+
def extract_ingredients(self, text):
|
91 |
+
try:
|
92 |
+
items = re.findall(r"[a-zA-Z]+(?:\s[a-zA-Z]+)*", text.lower())
|
93 |
+
blacklist = {'add', 'mix', 'cook', 'remove', 'combine', 'heat', 'stir', 'dash', 'before', 'cool', 'saucepan', 'until', 'mixture'}
|
94 |
+
filtered = [w for w in items if w not in blacklist and len(w.split()) <= 3]
|
95 |
+
unique = sorted(set(filtered))
|
96 |
+
return ", ".join(unique[:15])
|
97 |
+
except:
|
98 |
+
return text[:200]
|
99 |
|
100 |
def format_answer(self, raw, question):
|
101 |
+
if not raw:
|
102 |
+
return "[NO ANSWER]"
|
103 |
raw = raw.strip().strip("\"'")
|
104 |
q = question.lower()
|
105 |
+
if "ingredients" in q:
|
106 |
+
return self.extract_ingredients(raw)
|
107 |
if "algebraic notation" in q:
|
108 |
match = re.search(r"[KQBNR]?[a-h]?[1-8]?x?[a-h][1-8][+#]?", raw)
|
109 |
+
return match.group(0) if match else raw[:30]
|
110 |
if "award number" in q:
|
111 |
match = re.search(r"80NSSC[0-9A-Z]+", raw)
|
112 |
return match.group(0) if match else raw
|
113 |
if "usd" in q:
|
114 |
+
m = re.search(r"\$?\d+(\.\d{2})", raw)
|
115 |
return f"${m.group()}" if m else "$0.00"
|
116 |
if "first name" in q:
|
117 |
+
return raw.split()[0] if " " in raw else raw
|
118 |
try:
|
119 |
return str(w2n.word_to_num(raw))
|
120 |
except:
|
|
|
122 |
return m.group(0) if m else raw
|
123 |
|
124 |
def __call__(self, question, task_id=None):
|
125 |
+
try:
|
126 |
+
file, ctype = self.fetch_file(task_id) if task_id else (None, None)
|
127 |
+
if file:
|
128 |
+
context = self.handle_file(file, ctype, question)
|
129 |
+
else:
|
130 |
+
steps = self.get_step_by_step_plan(question)
|
131 |
+
context = self.search_with_steps(question, steps)
|
132 |
return self.format_answer(context, question)
|
133 |
+
except Exception as e:
|
134 |
+
return f"[AGENT ERROR: {e}]"
|
|
|
|