naman1102 commited on
Commit
04bd45b
·
1 Parent(s): 0fed708

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -12,7 +12,7 @@ from langgraph.prebuilt import ToolNode
12
  from langchain_openai import ChatOpenAI
13
  from langgraph.graph import StateGraph, START, END
14
  from langgraph.graph.message import add_messages
15
- from langchain.schema import HumanMessage, SystemMessage
16
  # Create a ToolNode that knows about your web_search function
17
  import json
18
  from state import AgentState
@@ -61,11 +61,13 @@ def plan_node(state: AgentState) -> AgentState:
61
  llm_response = llm([system_msg, human_msg])
62
  llm_out = llm_response.content.strip()
63
 
64
- # 4) Try to parse as a Python dict
 
 
65
  try:
66
  parsed = eval(llm_out, {}, {})
67
  if isinstance(parsed, dict):
68
- partial: AgentState = {"messages": prior_msgs.copy()}
69
  allowed = {
70
  "web_search_query",
71
  "ocr_path",
@@ -82,7 +84,7 @@ def plan_node(state: AgentState) -> AgentState:
82
 
83
  # 5) Fallback
84
  return {
85
- "messages": prior_msgs.copy(),
86
  "final_answer": "Sorry, I could not parse your intent."
87
  }
88
 
@@ -112,7 +114,7 @@ def finalize_node(state: AgentState) -> AgentState:
112
  if state.get("final_answer") is not None:
113
  return {"final_answer": state["final_answer"]}
114
 
115
- # 3) Otherwise, append our please give final answer SystemMessage
116
  messages_for_llm.append(
117
  SystemMessage(content="Please provide the final answer now.")
118
  )
 
12
  from langchain_openai import ChatOpenAI
13
  from langgraph.graph import StateGraph, START, END
14
  from langgraph.graph.message import add_messages
15
+ from langchain.schema import HumanMessage, SystemMessage, AIMessage
16
  # Create a ToolNode that knows about your web_search function
17
  import json
18
  from state import AgentState
 
61
  llm_response = llm([system_msg, human_msg])
62
  llm_out = llm_response.content.strip()
63
 
64
+ # 4) Always append the LLM output as an AIMessage
65
+ ai_msg = AIMessage(content=llm_out)
66
+ new_msgs = prior_msgs.copy() + [ai_msg]
67
  try:
68
  parsed = eval(llm_out, {}, {})
69
  if isinstance(parsed, dict):
70
+ partial: AgentState = {"messages": new_msgs}
71
  allowed = {
72
  "web_search_query",
73
  "ocr_path",
 
84
 
85
  # 5) Fallback
86
  return {
87
+ "messages": new_msgs,
88
  "final_answer": "Sorry, I could not parse your intent."
89
  }
90
 
 
114
  if state.get("final_answer") is not None:
115
  return {"final_answer": state["final_answer"]}
116
 
117
+ # 3) Otherwise, append our "please give final answer" SystemMessage
118
  messages_for_llm.append(
119
  SystemMessage(content="Please provide the final answer now.")
120
  )