Ahmud commited on
Commit
b72f130
·
verified ·
1 Parent(s): 3edd2ff

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +7 -51
agent.py CHANGED
@@ -25,6 +25,7 @@ llm = ChatOpenAI(
25
  model="moonshotai/kimi-k2:free", # Model must support function calling in OpenRouter
26
  temperature=1
27
  )
 
28
  python_tool = PythonAstREPLTool()
29
  search_tool = BraveSearch.from_api_key(
30
  api_key=os.getenv("BRAVE_SEARCH_API"),
@@ -103,70 +104,25 @@ gaia_agent = builder.compile() # converts my builder into a runnable agent by u
103
  class LangGraphAgent:
104
  def __init__(self):
105
  print("LangGraphAgent initialized.")
106
- self.question_count = 0 # Track the number of questions processed
107
 
108
  def __call__(self, question: str) -> str:
109
- # Determine which API key to use based on question count
110
- # First 50% of questions use OPENROUTER_API_KEY, rest use OPENROUTER_API_KEY_1
111
- api_key = os.getenv("OPENROUTER_API_KEY") if self.question_count % 2 == 0 else os.getenv("OPENROUTER_API_KEY_1")
112
-
113
- # Create a new LLM instance with the selected API key
114
- current_llm = ChatOpenAI(
115
- base_url="https://openrouter.ai/api/v1",
116
- api_key=api_key,
117
- model="moonshotai/kimi-k2:free",
118
- temperature=1
119
- )
120
-
121
- # Bind tools to the current LLM
122
- current_llm_with_tools = current_llm.bind_tools(tools)
123
-
124
- # Increment question counter for next call
125
- self.question_count += 1
126
-
127
- print(f"Running LangGraphAgent with input: {question[:150]}... (Using API key {self.question_count % 2 + 1})")
128
 
129
- # Create a custom LLM node for this specific question
130
- def custom_llm_call(state: MessagesState):
131
- return {
132
- "messages": [
133
- current_llm_with_tools.invoke(
134
- [SystemMessage(content=system_prompt)] + state["messages"]
135
- )
136
- ]
137
- }
138
-
139
- # Build a new workflow with the custom LLM
140
- custom_builder = StateGraph(MessagesState)
141
- custom_builder.add_node("llm_call", custom_llm_call)
142
- custom_builder.add_node("environment", tool_node)
143
- custom_builder.add_edge(START, "llm_call")
144
- custom_builder.add_conditional_edges(
145
- "llm_call",
146
- should_continue,
147
- {"Action": "environment", END: END}
148
- )
149
- custom_builder.add_edge("environment", "llm_call")
150
- custom_agent = custom_builder.compile()
151
-
152
- # Prepare the initial state and config
153
- input_state = {"messages": [HumanMessage(content=question)]}
154
  config = RunnableConfig(
155
  config={
156
  "run_name": "GAIA Agent",
157
  "tags": ["gaia", "langgraph", "agent"],
158
  "metadata": {"user_input": question},
159
- "recursion_limit": 30,
160
- "tracing": True
161
  }
162
  )
163
-
164
- # Run the agent
165
- result = custom_agent.invoke(input_state, config)
166
  final_response = result["messages"][-1].content
167
 
168
  try:
169
- return final_response.split("FINAL ANSWER:")[-1].strip()
170
  except Exception:
171
  print("Could not split on 'FINAL ANSWER:'")
172
  return final_response
 
25
  model="moonshotai/kimi-k2:free", # Model must support function calling in OpenRouter
26
  temperature=1
27
  )
28
+
29
  python_tool = PythonAstREPLTool()
30
  search_tool = BraveSearch.from_api_key(
31
  api_key=os.getenv("BRAVE_SEARCH_API"),
 
104
  class LangGraphAgent:
105
  def __init__(self):
106
  print("LangGraphAgent initialized.")
 
107
 
108
  def __call__(self, question: str) -> str:
109
+ input_state = {"messages": [HumanMessage(content=question)]} # prepare the initial user message
110
+ print(f"Running LangGraphAgent with input: {question[:150]}...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
+ # tracing configuration for LangSmith
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  config = RunnableConfig(
114
  config={
115
  "run_name": "GAIA Agent",
116
  "tags": ["gaia", "langgraph", "agent"],
117
  "metadata": {"user_input": question},
118
+ "recursion_limit": 30
 
119
  }
120
  )
121
+ result = gaia_agent.invoke(input_state, config) # prevents infinite looping when the LLM keeps calling tools over and over
 
 
122
  final_response = result["messages"][-1].content
123
 
124
  try:
125
+ return final_response.split("FINAL ANSWER:")[-1].strip() # parse out only what's after "FINAL ANSWER:"
126
  except Exception:
127
  print("Could not split on 'FINAL ANSWER:'")
128
  return final_response