Spaces:
Running
Running
Update agent.py
Browse files
agent.py
CHANGED
@@ -13,9 +13,14 @@ from langchain_experimental.tools.python.tool import PythonAstREPLTool # for lo
|
|
13 |
from tools import (calculator_basic, datetime_tools, transcribe_audio, transcribe_youtube, query_image, webpage_content, read_excel)
|
14 |
from prompt import system_prompt
|
15 |
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
18 |
|
|
|
|
|
19 |
llm = ChatOpenAI(
|
20 |
model="gpt-4o-mini",
|
21 |
api_key=os.getenv("OPENAI_API_KEY"),
|
@@ -104,7 +109,16 @@ class LangGraphAgent:
|
|
104 |
def __call__(self, question: str) -> str:
|
105 |
input_state = {"messages": [HumanMessage(content=question)]} # prepare the initial user message
|
106 |
print(f"Running LangGraphAgent with input: {question[:150]}...")
|
107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
final_response = result["messages"][-1].content
|
109 |
|
110 |
try:
|
|
|
13 |
from tools import (calculator_basic, datetime_tools, transcribe_audio, transcribe_youtube, query_image, webpage_content, read_excel)
|
14 |
from prompt import system_prompt
|
15 |
|
16 |
+
from langchain_core.runnables import RunnableConfig # for LangSmith tracking
|
17 |
+
|
18 |
+
# LangSmith to observe the agent
|
19 |
+
langsmith_api_key = os.getenv("LANGSMITH_API_KEY")
|
20 |
+
langsmith_tracing = os.getenv("LANGSMITH_TRACING")
|
21 |
|
22 |
+
# gpt-4.1-nano (cheaper for debugging) with temperature 0 for less randomness
|
23 |
+
# for benchmarking use 04-mini (better reasoning) or gpt-4o-mini (cheaper)
|
24 |
llm = ChatOpenAI(
|
25 |
model="gpt-4o-mini",
|
26 |
api_key=os.getenv("OPENAI_API_KEY"),
|
|
|
109 |
def __call__(self, question: str) -> str:
|
110 |
input_state = {"messages": [HumanMessage(content=question)]} # prepare the initial user message
|
111 |
print(f"Running LangGraphAgent with input: {question[:150]}...")
|
112 |
+
|
113 |
+
# tracing configuration for LangSmith
|
114 |
+
config = RunnableConfig(
|
115 |
+
config={
|
116 |
+
"run_name": "GAIA Agent",
|
117 |
+
"tags": ["gaia", "langgraph", "agent"],
|
118 |
+
"metadata": {"user_input": question}
|
119 |
+
}
|
120 |
+
)
|
121 |
+
result = gaia_agent.invoke(input_state, config, {"recursion_limit": 30}) # prevents infinite looping when the LLM keeps calling tools over and over
|
122 |
final_response = result["messages"][-1].content
|
123 |
|
124 |
try:
|