naman1102 commited on
Commit
319e085
Β·
1 Parent(s): 4cc8521

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -25
app.py CHANGED
@@ -160,23 +160,78 @@ def inspect_node(state: AgentState) -> AgentState:
160
  After running a tool, show GPT:
161
  - ORIGINAL user question
162
  - Any tool results (web_search_result, ocr_result, excel_result, transcript, wiki_result)
163
- - The INTERIM_ANSWER (what plan_node initially provided under 'final_answer')
164
- Then ask GPT to either:
 
 
165
  β€’ Return {"final_answer":"<final>"} if done, OR
166
  β€’ Return exactly one tool key to run next (wiki_query / ocr_path / excel_path & excel_sheet_name / audio_path).
167
  """
168
  global tool_counter
169
 
170
- # 0) If we've already called tools too many times, force a final answer:
171
  if tool_counter >= 5:
172
- return {
173
- "messages": state["messages"],
174
- "final_answer": state.get("final_answer", "ERROR: no interim_answer to finalize."),
175
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  messages_for_llm = []
178
 
179
- # 1) Re‐insert original user question
180
  question = ""
181
  for msg in reversed(state.get("messages", [])):
182
  if isinstance(msg, HumanMessage):
@@ -184,7 +239,7 @@ def inspect_node(state: AgentState) -> AgentState:
184
  break
185
  messages_for_llm.append(SystemMessage(content=f"USER_QUESTION: {question}"))
186
 
187
- # 2) Add any tool results
188
  if sr := state.get("web_search_result"):
189
  messages_for_llm.append(SystemMessage(content=f"WEB_SEARCH_RESULT: {sr}"))
190
  if orc := state.get("ocr_result"):
@@ -196,14 +251,14 @@ def inspect_node(state: AgentState) -> AgentState:
196
  if wr := state.get("wiki_result"):
197
  messages_for_llm.append(SystemMessage(content=f"WIKIPEDIA_RESULT: {wr}"))
198
 
199
- # 3) Add the interim answer under INTERIM_ANSWER
200
- if ia := state.get("final_answer"):
201
- messages_for_llm.append(SystemMessage(content=f"INTERIM_ANSWER: {ia}"))
202
 
203
- # 4) Prompt GPT to decide final or another tool
204
  prompt = (
205
  "You have a current draft answer (INTERIM_ANSWER) and possibly some tool results above.\n"
206
- "If you are confident it's correct, return exactly:\n"
207
  " {\"final_answer\":\"<your final answer>\"}\n"
208
  "and nothing else.\n"
209
  "Otherwise, return exactly one of these JSON literals to fetch another tool:\n"
@@ -216,35 +271,30 @@ def inspect_node(state: AgentState) -> AgentState:
216
  messages_for_llm.append(SystemMessage(content=prompt))
217
  llm_response = llm(messages_for_llm)
218
  raw = llm_response.content.strip()
219
-
220
  new_msgs = state["messages"] + [AIMessage(content=raw)]
221
 
222
- # Try to parse the LLM's JSON
223
  try:
224
  parsed = json.loads(raw)
225
  if isinstance(parsed, dict):
226
- # If GPT gave a final_answer, we finish here
227
  if "final_answer" in parsed:
228
  return {"messages": new_msgs, "final_answer": parsed["final_answer"]}
229
 
230
- # If GPT requested exactly one valid tool, return only that key
231
  valid_keys = {"wiki_query", "ocr_path", "excel_path", "excel_sheet_name", "audio_path"}
232
  requested_keys = set(parsed.keys()) & valid_keys
233
  if len(requested_keys) == 1:
234
  clean: AgentState = {"messages": new_msgs}
235
- # Carry forward the global tool_counter implicitly (no need to store in state)
236
  for k in requested_keys:
237
  clean[k] = parsed[k]
238
  return clean
239
  except json.JSONDecodeError:
240
  pass
241
 
242
- # Fallback: if GPT didn't give a valid tool key or final_answer, finalize with existing interim
243
- if ia := state.get("final_answer"):
244
- return {"messages": new_msgs, "final_answer": ia}
245
-
246
- # If there is no interim either, we cannot proceed
247
- return {"messages": new_msgs, "final_answer": "ERROR: could not parse inspect decision."}
248
 
249
 
250
  # ─── 6) finalize_node ───
 
160
  After running a tool, show GPT:
161
  - ORIGINAL user question
162
  - Any tool results (web_search_result, ocr_result, excel_result, transcript, wiki_result)
163
+ - The INTERIM_ANSWER (always present if plan_node ran correctly)
164
+
165
+ If tool_counter β‰₯ 5, use LLM once more (with full context) to craft a final answer.
166
+ Otherwise, ask GPT to either:
167
  β€’ Return {"final_answer":"<final>"} if done, OR
168
  β€’ Return exactly one tool key to run next (wiki_query / ocr_path / excel_path & excel_sheet_name / audio_path).
169
  """
170
  global tool_counter
171
 
172
+ # 0) MAX‐TOOLS GUARD: if we've already run 5 tools, ask LLM to finalize
173
  if tool_counter >= 5:
174
+ # Build a fresh prompt that includes:
175
+ # - The user’s original question
176
+ # - Any tool results collected so far
177
+ # - The interim answer
178
+ # Then ask for a polished final_answer.
179
+ messages_for_llm = []
180
+
181
+ # (1) Put back the user question
182
+ question = ""
183
+ for msg in reversed(state.get("messages", [])):
184
+ if isinstance(msg, HumanMessage):
185
+ question = msg.content
186
+ break
187
+ messages_for_llm.append(SystemMessage(content=f"USER_QUESTION: {question}"))
188
+
189
+ # (2) Add any tool results
190
+ if sr := state.get("web_search_result"):
191
+ messages_for_llm.append(SystemMessage(content=f"WEB_SEARCH_RESULT: {sr}"))
192
+ if orc := state.get("ocr_result"):
193
+ messages_for_llm.append(SystemMessage(content=f"OCR_RESULT: {orc}"))
194
+ if exr := state.get("excel_result"):
195
+ messages_for_llm.append(SystemMessage(content=f"EXCEL_RESULT: {exr}"))
196
+ if tr := state.get("transcript"):
197
+ messages_for_llm.append(SystemMessage(content=f"AUDIO_TRANSCRIPT: {tr}"))
198
+ if wr := state.get("wiki_result"):
199
+ messages_for_llm.append(SystemMessage(content=f"WIKIPEDIA_RESULT: {wr}"))
200
+
201
+ # (3) Always show the interim answer
202
+ interim = state.get("interim_answer", "")
203
+ messages_for_llm.append(SystemMessage(content=f"INTERIM_ANSWER: {interim}"))
204
+
205
+ # (4) Prompt GPT to produce a polished final answer
206
+ prompt = (
207
+ "You have the user’s question, any relevant tool results above,"
208
+ " and a draft answer (INTERIM_ANSWER).\n"
209
+ "Using all of that, write a concise, polished final answer. "
210
+ "Return exactly:\n"
211
+ " {\"final_answer\":\"<your final answer>\"}\n"
212
+ "and nothing else.\n"
213
+ )
214
+ messages_for_llm.append(SystemMessage(content=prompt))
215
+
216
+ llm_response = llm(messages_for_llm)
217
+ raw = llm_response.content.strip()
218
+ new_msgs = state["messages"] + [AIMessage(content=raw)]
219
 
220
+ # Parse the LLM’s JSON
221
+ try:
222
+ parsed = json.loads(raw)
223
+ if isinstance(parsed, dict) and "final_answer" in parsed:
224
+ return {"messages": new_msgs, "final_answer": parsed["final_answer"]}
225
+ except json.JSONDecodeError:
226
+ pass
227
+
228
+ # If parsing fails, at least return the interim as fallback
229
+ return {"messages": new_msgs, "final_answer": interim}
230
+
231
+ # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€” If tool_counter < 5, proceed as before β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
232
  messages_for_llm = []
233
 
234
+ # (1) Re‐insert original user question
235
  question = ""
236
  for msg in reversed(state.get("messages", [])):
237
  if isinstance(msg, HumanMessage):
 
239
  break
240
  messages_for_llm.append(SystemMessage(content=f"USER_QUESTION: {question}"))
241
 
242
+ # (2) Add any tool results
243
  if sr := state.get("web_search_result"):
244
  messages_for_llm.append(SystemMessage(content=f"WEB_SEARCH_RESULT: {sr}"))
245
  if orc := state.get("ocr_result"):
 
251
  if wr := state.get("wiki_result"):
252
  messages_for_llm.append(SystemMessage(content=f"WIKIPEDIA_RESULT: {wr}"))
253
 
254
+ # (3) Always show the interim answer
255
+ interim = state.get("interim_answer", "")
256
+ messages_for_llm.append(SystemMessage(content=f"INTERIM_ANSWER: {interim}"))
257
 
258
+ # (4) Prompt GPT to decide final or another tool
259
  prompt = (
260
  "You have a current draft answer (INTERIM_ANSWER) and possibly some tool results above.\n"
261
+ "If you are confident it’s correct, return exactly:\n"
262
  " {\"final_answer\":\"<your final answer>\"}\n"
263
  "and nothing else.\n"
264
  "Otherwise, return exactly one of these JSON literals to fetch another tool:\n"
 
271
  messages_for_llm.append(SystemMessage(content=prompt))
272
  llm_response = llm(messages_for_llm)
273
  raw = llm_response.content.strip()
 
274
  new_msgs = state["messages"] + [AIMessage(content=raw)]
275
 
276
+ # Try to parse the LLM’s JSON
277
  try:
278
  parsed = json.loads(raw)
279
  if isinstance(parsed, dict):
280
+ # (a) If GPT gave a final_answer, return immediately
281
  if "final_answer" in parsed:
282
  return {"messages": new_msgs, "final_answer": parsed["final_answer"]}
283
 
284
+ # (b) If GPT requested exactly one valid tool, return only that key
285
  valid_keys = {"wiki_query", "ocr_path", "excel_path", "excel_sheet_name", "audio_path"}
286
  requested_keys = set(parsed.keys()) & valid_keys
287
  if len(requested_keys) == 1:
288
  clean: AgentState = {"messages": new_msgs}
 
289
  for k in requested_keys:
290
  clean[k] = parsed[k]
291
  return clean
292
  except json.JSONDecodeError:
293
  pass
294
 
295
+ # (c) Fallback: if GPT never returned a valid tool key or a final_answer,
296
+ # just finalize with the existing interim_answer
297
+ return {"messages": new_msgs, "final_answer": interim}
 
 
 
298
 
299
 
300
  # ─── 6) finalize_node ───