Update agent.py
Browse files
agent.py
CHANGED
@@ -1,43 +1,79 @@
|
|
1 |
-
# agent.py —
|
2 |
|
3 |
-
import asyncio
|
4 |
import os
|
|
|
5 |
from llama_index.llms.openai import OpenAI
|
6 |
-
from llama_index.core.tools import FunctionTool
|
7 |
from llama_index.core.agent.react.base import ReActAgent
|
|
|
8 |
|
9 |
-
from langchain_community.tools import
|
10 |
from langchain_experimental.tools.python.tool import PythonREPLTool
|
11 |
from langchain_community.document_loaders import YoutubeLoader
|
12 |
|
13 |
-
|
|
|
|
|
|
|
14 |
if os.getenv("OPENAI_API_KEY"):
|
15 |
print("✅ Detected OPENAI_API_KEY in environment")
|
16 |
else:
|
17 |
-
print("⚠️ Missing OPENAI_API_KEY — LLM may
|
18 |
-
|
19 |
-
# Tool wrappers
|
20 |
-
def search_duckduckgo(query: str) -> str:
|
21 |
-
return DuckDuckGoSearchRun().run(query)
|
22 |
|
23 |
-
|
24 |
-
|
|
|
25 |
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
29 |
def get_youtube_transcript(url: str) -> str:
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
|
|
34 |
TOOLS = [
|
35 |
-
FunctionTool.from_defaults(
|
36 |
-
FunctionTool.from_defaults(
|
37 |
-
FunctionTool.from_defaults(run_python),
|
38 |
FunctionTool.from_defaults(get_youtube_transcript),
|
|
|
|
|
39 |
]
|
40 |
|
|
|
41 |
llm = OpenAI(model="gpt-4")
|
42 |
|
43 |
agent = ReActAgent.from_tools(
|
@@ -47,16 +83,22 @@ agent = ReActAgent.from_tools(
|
|
47 |
system_prompt="""
|
48 |
You are an expert AI assistant participating in the GAIA benchmark.
|
49 |
|
50 |
-
Your
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
Rules:
|
53 |
-
1. Output
|
54 |
-
2. Format exactly
|
55 |
-
3. Use tools
|
|
|
56 |
"""
|
57 |
)
|
58 |
|
59 |
-
#
|
60 |
def answer_question_sync(question: str) -> str:
|
61 |
try:
|
62 |
response = agent.chat(question)
|
@@ -70,6 +112,5 @@ def answer_question_sync(question: str) -> str:
|
|
70 |
print(f"❌ Exception while answering: {e}")
|
71 |
return f"[ERROR] {e}"
|
72 |
|
73 |
-
# Async wrapper for FastAPI/Gradio
|
74 |
async def answer_question(question: str) -> str:
|
75 |
return answer_question_sync(question)
|
|
|
1 |
+
# agent.py — full GAIA-ready agent with tools for web, audio, Excel, Python
|
2 |
|
|
|
3 |
import os
|
4 |
+
import asyncio
|
5 |
from llama_index.llms.openai import OpenAI
|
|
|
6 |
from llama_index.core.agent.react.base import ReActAgent
|
7 |
+
from llama_index.core.tools import FunctionTool
|
8 |
|
9 |
+
from langchain_community.tools.wikipedia.tool import WikipediaTool
|
10 |
from langchain_experimental.tools.python.tool import PythonREPLTool
|
11 |
from langchain_community.document_loaders import YoutubeLoader
|
12 |
|
13 |
+
import openai_whisper as whisper
|
14 |
+
import openpyxl
|
15 |
+
|
16 |
+
# Confirm OpenAI API key
|
17 |
if os.getenv("OPENAI_API_KEY"):
|
18 |
print("✅ Detected OPENAI_API_KEY in environment")
|
19 |
else:
|
20 |
+
print("⚠️ Missing OPENAI_API_KEY — LLM may not work")
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# --- Web tools ---
|
23 |
+
def wikipedia_search(query: str) -> str:
|
24 |
+
return WikipediaTool().run(query)
|
25 |
|
26 |
+
# --- Python with output ---
|
27 |
+
def run_python_with_output(code: str) -> str:
|
28 |
+
try:
|
29 |
+
if "print(" not in code:
|
30 |
+
code = f"print({code})" if not code.strip().endswith("\n") else f"print({code.strip()})"
|
31 |
+
return PythonREPLTool().run(code)
|
32 |
+
except Exception as e:
|
33 |
+
return f"[PYTHON ERROR] {e}"
|
34 |
|
35 |
+
# --- YouTube (fallback placeholder) ---
|
36 |
def get_youtube_transcript(url: str) -> str:
|
37 |
+
try:
|
38 |
+
loader = YoutubeLoader.from_youtube_url(url, add_video_info=False)
|
39 |
+
docs = loader.load()
|
40 |
+
return " ".join(doc.page_content for doc in docs)
|
41 |
+
except Exception as e:
|
42 |
+
return f"[YOUTUBE ERROR] {e}"
|
43 |
+
|
44 |
+
# --- Whisper transcription ---
|
45 |
+
def transcribe_audio(file_path: str) -> str:
|
46 |
+
try:
|
47 |
+
model = whisper.load_model("base")
|
48 |
+
result = model.transcribe(file_path)
|
49 |
+
return result['text']
|
50 |
+
except Exception as e:
|
51 |
+
return f"[AUDIO ERROR] {e}"
|
52 |
+
|
53 |
+
# --- Excel sales extraction ---
|
54 |
+
def extract_excel_total_food_sales(file_path: str) -> str:
|
55 |
+
try:
|
56 |
+
wb = openpyxl.load_workbook(file_path)
|
57 |
+
sheet = wb.active
|
58 |
+
total = 0
|
59 |
+
for row in sheet.iter_rows(min_row=2, values_only=True):
|
60 |
+
category, amount = row[1], row[2]
|
61 |
+
if isinstance(category, str) and 'food' in category.lower():
|
62 |
+
total += float(amount)
|
63 |
+
return f"${total:.2f}"
|
64 |
+
except Exception as e:
|
65 |
+
return f"[EXCEL ERROR] {e}"
|
66 |
|
67 |
+
# --- Tool list ---
|
68 |
TOOLS = [
|
69 |
+
FunctionTool.from_defaults(wikipedia_search),
|
70 |
+
FunctionTool.from_defaults(run_python_with_output),
|
|
|
71 |
FunctionTool.from_defaults(get_youtube_transcript),
|
72 |
+
FunctionTool.from_defaults(transcribe_audio),
|
73 |
+
FunctionTool.from_defaults(extract_excel_total_food_sales),
|
74 |
]
|
75 |
|
76 |
+
# --- LLM and Agent ---
|
77 |
llm = OpenAI(model="gpt-4")
|
78 |
|
79 |
agent = ReActAgent.from_tools(
|
|
|
83 |
system_prompt="""
|
84 |
You are an expert AI assistant participating in the GAIA benchmark.
|
85 |
|
86 |
+
Your goal is to answer 20 diverse questions using available tools:
|
87 |
+
- Wikipedia search
|
88 |
+
- Python code runner
|
89 |
+
- YouTube transcript
|
90 |
+
- MP3 transcription (Whisper)
|
91 |
+
- Excel analysis
|
92 |
|
93 |
Rules:
|
94 |
+
1. Output only the FINAL answer. No explanations.
|
95 |
+
2. Format must match expected output exactly: comma-separated lists, plain names, numeric values, algebraic notation.
|
96 |
+
3. Use tools smartly. Don't guess when tools can help.
|
97 |
+
4. If tools fail (e.g., YouTube blocked), say clearly: "Tool not available".
|
98 |
"""
|
99 |
)
|
100 |
|
101 |
+
# --- Run function ---
|
102 |
def answer_question_sync(question: str) -> str:
|
103 |
try:
|
104 |
response = agent.chat(question)
|
|
|
112 |
print(f"❌ Exception while answering: {e}")
|
113 |
return f"[ERROR] {e}"
|
114 |
|
|
|
115 |
async def answer_question(question: str) -> str:
|
116 |
return answer_question_sync(question)
|