wt002 commited on
Commit
3f9a023
·
verified ·
1 Parent(s): 0a73fdc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -15
app.py CHANGED
@@ -143,23 +143,28 @@ def reasoning_node(state: AgentState) -> AgentState:
143
  from langchain.schema import HumanMessage, AIMessage
144
  from langchain.prompts import ChatPromptTemplate
145
 
146
- # Explicitly load the Google API key
147
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
148
  if not GOOGLE_API_KEY:
149
  raise ValueError("GOOGLE_API_KEY not set in environment variables.")
150
 
151
- # Defensive: Ensure valid message history
152
- if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
 
153
  state["history"].append(HumanMessage(content="Continue."))
154
 
155
- # Initialize Gemini model
 
 
 
 
156
  llm = ChatGoogleGenerativeAI(
157
  model="gemini-1.5-flash",
158
  temperature=0.1,
159
  google_api_key=GOOGLE_API_KEY
160
  )
161
 
162
- # Build prompt
163
  prompt = ChatPromptTemplate.from_messages([
164
  ("system", (
165
  "You're an expert problem solver. Analyze the question, select the best tool, "
@@ -170,36 +175,37 @@ def reasoning_node(state: AgentState) -> AgentState:
170
  "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
171
  "Action Input: [Input for tool OR final response]"
172
  )),
173
- *state['history']
174
  ])
175
 
 
176
  chain = prompt | llm
177
-
178
- # Invoke chain with inputs
179
  response = chain.invoke({
180
  "context": state["context"],
181
- "reasoning": state["reasoning"],
182
  "question": state["question"]
183
  })
184
 
185
  content = response.content
186
  reasoning, action, action_input = parse_agent_response(content)
187
 
188
- # Update agent state
189
- state['history'].append(AIMessage(content=content))
190
- state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
191
- state['iterations'] += 1
192
 
 
193
  if "final answer" in action.lower():
194
- state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
195
  else:
196
- state['context']['current_tool'] = {
197
  "tool": action,
198
  "input": action_input
199
  }
200
 
201
  return state
202
 
 
203
 
204
 
205
  def tool_node(state: AgentState) -> AgentState:
 
143
  from langchain.schema import HumanMessage, AIMessage
144
  from langchain.prompts import ChatPromptTemplate
145
 
146
+ # Load and verify the Google API key
147
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
148
  if not GOOGLE_API_KEY:
149
  raise ValueError("GOOGLE_API_KEY not set in environment variables.")
150
 
151
+ # Ensure history ends with a HumanMessage
152
+ if not state.get("history") or not isinstance(state["history"][-1], HumanMessage):
153
+ state["history"] = state.get("history", [])
154
  state["history"].append(HumanMessage(content="Continue."))
155
 
156
+ # Ensure context is a dictionary
157
+ if not isinstance(state.get("context"), dict):
158
+ state["context"] = {}
159
+
160
+ # Initialize the Gemini model (via LangChain wrapper)
161
  llm = ChatGoogleGenerativeAI(
162
  model="gemini-1.5-flash",
163
  temperature=0.1,
164
  google_api_key=GOOGLE_API_KEY
165
  )
166
 
167
+ # Create prompt from messages
168
  prompt = ChatPromptTemplate.from_messages([
169
  ("system", (
170
  "You're an expert problem solver. Analyze the question, select the best tool, "
 
175
  "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
176
  "Action Input: [Input for tool OR final response]"
177
  )),
178
+ *state["history"]
179
  ])
180
 
181
+ # Build and invoke the chain
182
  chain = prompt | llm
 
 
183
  response = chain.invoke({
184
  "context": state["context"],
185
+ "reasoning": state.get("reasoning", ""),
186
  "question": state["question"]
187
  })
188
 
189
  content = response.content
190
  reasoning, action, action_input = parse_agent_response(content)
191
 
192
+ # Update state
193
+ state["history"].append(AIMessage(content=content))
194
+ state["reasoning"] += f"\nStep {state['iterations']+1}: {reasoning}"
195
+ state["iterations"] += 1
196
 
197
+ # Decide next step based on action
198
  if "final answer" in action.lower():
199
+ state["history"].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
200
  else:
201
+ state["context"]["current_tool"] = {
202
  "tool": action,
203
  "input": action_input
204
  }
205
 
206
  return state
207
 
208
+
209
 
210
 
211
  def tool_node(state: AgentState) -> AgentState: