from __future__ import annotations import os from langchain_openai import ChatOpenAI from langgraph.graph import StateGraph, START, END from langchain.schema import HumanMessage, SystemMessage, AIMessage from state import AgentState from typing import Any, Dict, List, Optional import json from langgraph.prebuilt import create_react_agent # ─────────────────────────── External tools ────────────────────────────── from tools import ( wikipedia_search_tool, arxiv_search_tool, audio_transcriber_tool, excel_tool, analyze_code_tool ) # ─────────────────────────── Configuration ─────────────────────────────── SYSTEM_PROMPT = """ You are a general AI assistant. I will ask you a question. Don't report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. """ MAX_TOOL_CALLS = 5 # ─────────────────────────── Helper utilities ──────────────────────────── # ─────────────────────────── Agent state ⬇ ─────────────────────────────── # ───────────────────────────── Nodes ⬇ ─────────────────────────────────── # ------------- tool adapters ------------- # ─────────────────────────── Graph wiring ─────────────────────────────── def build_graph(): """Build and return a create_react_agent.""" llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0.3) llm_tools = [ wikipedia_search_tool, arxiv_search_tool, audio_transcriber_tool, excel_tool, analyze_code_tool ] # Create the react agent with proper system prompt agent = create_react_agent(model=llm, tools=llm_tools, prompt=SYSTEM_PROMPT) return agent