naman1102 commited on
Commit
772d3fb
·
1 Parent(s): eea769a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -22
app.py CHANGED
@@ -5,19 +5,14 @@ import inspect
5
  import pandas as pd
6
  from langgraph.prebuilt import ToolNode, create_react_agent
7
  from tools import web_search, parse_excel, ocr_image
8
- # import langgraph
9
- from typing import TypedDict, Annotated
 
10
 
11
  from langchain_openai import ChatOpenAI
12
  from langgraph.graph import StateGraph, START, END
13
  from langgraph.graph.message import add_messages
14
- import langgraph
15
- import importlib.metadata
16
- try:
17
- lg_ver = importlib.metadata.version("langgraph")
18
- print("▶︎ LangGraph version:", lg_ver)
19
- except importlib.metadata.PackageNotFoundError:
20
- print("LangGraph is not installed.")
21
  # Create a ToolNode that knows about your web_search function
22
 
23
  # (Keep Constants as is)
@@ -50,27 +45,31 @@ compiled_graph = graph.compile()
50
  # ─── 5) Define `respond_to_input` to call `compiled_graph.invoke` ───
51
  def respond_to_input(user_input: str) -> str:
52
  """
53
- We must supply `{"messages": [ { "role": "user", "content": ... } ]}`
54
- because create_react_agent expects `state["messages"]` to be a list of
55
- {role,content} dicts, not plain strings.
 
 
56
  """
 
57
  initial_state: Dict[str, Any] = {
58
- "messages": [
59
- {"role": "user", "content": user_input}
60
- ]
61
  }
62
- # In langgraph 0.4.7, the compiled graph uses .invoke(...)
 
63
  final_state = compiled_graph.invoke(initial_state)
64
- # The default agent appends its own assistant messages to state["messages"].
65
- # Extract the **last** message where role == "assistant".
 
66
  assistant_messages = [
67
- msg["content"]
68
  for msg in final_state["messages"]
69
- if msg.get("role") == "assistant"
70
  ]
71
  if not assistant_messages:
72
- return "❗️Agent did not return any assistant message."
73
- # Return the very last assistant “content”
 
74
  return assistant_messages[-1]
75
 
76
  class BasicAgent:
 
5
  import pandas as pd
6
  from langgraph.prebuilt import ToolNode, create_react_agent
7
  from tools import web_search, parse_excel, ocr_image
8
+
9
+ from typing import Any, Dict
10
+ # from typing import TypedDict, Annotated
11
 
12
  from langchain_openai import ChatOpenAI
13
  from langgraph.graph import StateGraph, START, END
14
  from langgraph.graph.message import add_messages
15
+ from langchain.schema import HumanMessage, AIMessage
 
 
 
 
 
 
16
  # Create a ToolNode that knows about your web_search function
17
 
18
  # (Keep Constants as is)
 
45
  # ─── 5) Define `respond_to_input` to call `compiled_graph.invoke` ───
46
  def respond_to_input(user_input: str) -> str:
47
  """
48
+ In v0.4.7, create_react_agent expects:
49
+ state["messages"] == list[BaseMessage], typically starting with a HumanMessage.
50
+
51
+ We feed it exactly that, then call compiled_graph.invoke().
52
+ Finally, we scan final_state["messages"] for the last AIMessage and return its .content.
53
  """
54
+ # 5.a) Build the initial state with a single HumanMessage
55
  initial_state: Dict[str, Any] = {
56
+ "messages": [ HumanMessage(content=user_input) ]
 
 
57
  }
58
+
59
+ # 5.b) In LangGraph v0.4.7, use .invoke(...)
60
  final_state = compiled_graph.invoke(initial_state)
61
+
62
+ # 5.c) Extract the last AIMessage from final_state["messages"]
63
+ # (create_react_agent always appends its assistant replies as AIMessage)
64
  assistant_messages = [
65
+ msg.content
66
  for msg in final_state["messages"]
67
+ if isinstance(msg, AIMessage)
68
  ]
69
  if not assistant_messages:
70
+ return "❗️Agent did not return any AIMessage."
71
+
72
+ # Return the final AIMessage's content
73
  return assistant_messages[-1]
74
 
75
  class BasicAgent: