# agent.py — stable ReActAgent version with API key check and error logging import asyncio import os from llama_index.llms.openai import OpenAI from llama_index.core.tools import FunctionTool from llama_index.core.agent.react.base import ReActAgent from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun from langchain_experimental.tools.python.tool import PythonREPLTool from langchain_community.document_loaders import YoutubeLoader # Optional log check for Hugging Face Secrets if os.getenv("OPENAI_API_KEY"): print("✅ Detected OPENAI_API_KEY in environment") else: print("⚠️ Missing OPENAI_API_KEY — LLM may fail") # Tool wrappers def search_duckduckgo(query: str) -> str: return DuckDuckGoSearchRun().run(query) def search_wikipedia(query: str) -> str: return WikipediaQueryRun(api_wrapper=None).run(query) def run_python(code: str) -> str: return PythonREPLTool().run(code) def get_youtube_transcript(url: str) -> str: loader = YoutubeLoader.from_youtube_url(url, add_video_info=False) docs = loader.load() return " ".join(doc.page_content for doc in docs) TOOLS = [ FunctionTool.from_defaults(search_duckduckgo), FunctionTool.from_defaults(search_wikipedia), FunctionTool.from_defaults(run_python), FunctionTool.from_defaults(get_youtube_transcript), ] llm = OpenAI(model="gpt-4") agent = ReActAgent.from_tools( tools=TOOLS, llm=llm, verbose=True, system_prompt=""" You are an expert AI assistant participating in the GAIA benchmark. Your task is to answer questions as precisely and exactly as possible. Each answer is evaluated automatically—strict formatting matters. Rules: 1. Output ONLY the final answer — no explanation or commentary. 2. Format exactly as requested (lists, names, numbers, chess moves, currency). 3. Use tools as needed — no guessing. """ ) # Safe sync wrapper def answer_question_sync(question: str) -> str: try: response = agent.chat(question) if hasattr(response, "response") and hasattr(response.response, "content"): return response.response.content.strip() elif isinstance(response, str): return response.strip() else: return str(response) except Exception as e: print(f"❌ Exception while answering: {e}") return f"[ERROR] {e}" # Async wrapper for FastAPI/Gradio async def answer_question(question: str) -> str: return answer_question_sync(question)