naman1102 commited on
Commit
c30265f
·
1 Parent(s): dafb093

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -24
app.py CHANGED
@@ -168,18 +168,14 @@ def inspect_node(state: AgentState) -> AgentState:
168
  • Return {"final_answer":"<final>"} if done, OR
169
  • Return exactly one tool key to run next (wiki_query / ocr_path / excel_path & excel_sheet_name / audio_path).
170
  """
 
171
  global tool_counter
172
 
173
- # 0) MAX‐TOOLS GUARD: if we've already run 5 tools, ask LLM to finalize
174
  if tool_counter >= 5:
175
- # Build a fresh prompt that includes:
176
- # - The user’s original question
177
- # - Any tool results collected so far
178
- # - The interim answer
179
- # Then ask for a polished final_answer.
180
  messages_for_llm = []
181
 
182
- # (1) Put back the user question
183
  question = ""
184
  for msg in reversed(state.get("messages", [])):
185
  if isinstance(msg, HumanMessage):
@@ -187,7 +183,7 @@ def inspect_node(state: AgentState) -> AgentState:
187
  break
188
  messages_for_llm.append(SystemMessage(content=f"USER_QUESTION: {question}"))
189
 
190
- # (2) Add any tool results
191
  if sr := state.get("web_search_result"):
192
  messages_for_llm.append(SystemMessage(content=f"WEB_SEARCH_RESULT: {sr}"))
193
  if orc := state.get("ocr_result"):
@@ -199,29 +195,26 @@ def inspect_node(state: AgentState) -> AgentState:
199
  if wr := state.get("wiki_result"):
200
  messages_for_llm.append(SystemMessage(content=f"WIKIPEDIA_RESULT: {wr}"))
201
 
202
- # (3) Always show the interim answer
203
  interim = state.get("interim_answer", "")
204
  messages_for_llm.append(SystemMessage(content=f"INTERIM_ANSWER: {interim}"))
205
- messages_for_llm.append(SystemMessage(content="Please write a polished final answer, with no explanation at all."))
206
- # (4) Prompt GPT to produce a polished final answer
207
-
208
- prompt = (
209
  "You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
210
- "You have the user’s question, any relevant tool results above,"
211
- " and a draft answer (INTERIM_ANSWER).\n"
212
- "Using all of that, write a concise, polished final answer. "
213
- "Do not include any explanation or commentary, just the answer."
214
- "Return exactly:\n"
215
- " {\"final_answer\":\"<your final answer>\"}\n"
216
- "and nothing else.\n"
217
  )
218
- messages_for_llm.append(SystemMessage(content=prompt))
219
 
220
  llm_response = llm(messages_for_llm)
221
  raw = llm_response.content.strip()
222
  new_msgs = state["messages"] + [AIMessage(content=raw)]
223
 
224
- # Parse the LLM’s JSON
225
  try:
226
  parsed = json.loads(raw)
227
  if isinstance(parsed, dict) and "final_answer" in parsed:
@@ -229,9 +222,8 @@ def inspect_node(state: AgentState) -> AgentState:
229
  except json.JSONDecodeError:
230
  pass
231
 
232
- # If parsing fails, at least return the interim as fallback
233
  return {"messages": new_msgs, "final_answer": interim}
234
-
235
  # ——————————— If tool_counter < 5, proceed as before ———————————
236
  messages_for_llm = []
237
 
 
168
  • Return {"final_answer":"<final>"} if done, OR
169
  • Return exactly one tool key to run next (wiki_query / ocr_path / excel_path & excel_sheet_name / audio_path).
170
  """
171
+
172
  global tool_counter
173
 
174
+ # If we've already run 5 tools, ask GPT for a strictly‐formatted JSON final_answer
175
  if tool_counter >= 5:
 
 
 
 
 
176
  messages_for_llm = []
177
 
178
+ # Re‐insert the user’s question
179
  question = ""
180
  for msg in reversed(state.get("messages", [])):
181
  if isinstance(msg, HumanMessage):
 
183
  break
184
  messages_for_llm.append(SystemMessage(content=f"USER_QUESTION: {question}"))
185
 
186
+ # Add any tool results so far
187
  if sr := state.get("web_search_result"):
188
  messages_for_llm.append(SystemMessage(content=f"WEB_SEARCH_RESULT: {sr}"))
189
  if orc := state.get("ocr_result"):
 
195
  if wr := state.get("wiki_result"):
196
  messages_for_llm.append(SystemMessage(content=f"WIKIPEDIA_RESULT: {wr}"))
197
 
198
+ # Show the interim answer
199
  interim = state.get("interim_answer", "")
200
  messages_for_llm.append(SystemMessage(content=f"INTERIM_ANSWER: {interim}"))
201
+
202
+ # Now ask for JSON ONLY (no reasoning, no extra text)
203
+ final_prompt = (
 
204
  "You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
205
+ "Using only the information above—including the USER_QUESTION, "
206
+ "any TOOL_RESULT, and the INTERIM_ANSWER—produce a concise final answer. "
207
+ "Return exactly one JSON object and nothing else, in this format:\n\n"
208
+ "{\"final_answer\":\"<your final answer>\"}\n"
209
+ "Do not include any other words or punctuation outside that JSON."
 
 
210
  )
211
+ messages_for_llm.append(SystemMessage(content=final_prompt))
212
 
213
  llm_response = llm(messages_for_llm)
214
  raw = llm_response.content.strip()
215
  new_msgs = state["messages"] + [AIMessage(content=raw)]
216
 
217
+ # Try to parse exactly one JSON with "final_answer"
218
  try:
219
  parsed = json.loads(raw)
220
  if isinstance(parsed, dict) and "final_answer" in parsed:
 
222
  except json.JSONDecodeError:
223
  pass
224
 
225
+ # Fallback to returning the interim in case JSON parse fails
226
  return {"messages": new_msgs, "final_answer": interim}
 
227
  # ——————————— If tool_counter < 5, proceed as before ———————————
228
  messages_for_llm = []
229