naman1102 commited on
Commit
eea769a
·
1 Parent(s): a03e926

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -41
app.py CHANGED
@@ -24,60 +24,54 @@ except importlib.metadata.PackageNotFoundError:
24
  # --- Constants ---
25
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
26
 
27
- # --- Basic Agent Definition ---
28
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
29
- class AgentState(TypedDict):
30
- # Keep track of the full “chat history” so the LLM sees it each time
31
- messages: list[str]
32
- # These two fields are _optional_—we won’t even use them directly in our code,
33
- # but the ReAct agent will populate them when it calls a tool.
34
- tool_name: str # e.g. "ocr_image" or "web_search"
35
- tool_input: str # whatever arguments you passed into the tool
36
-
37
- # ─── 2) Instantiate your LLM (ChatOpenAI) ───
38
- llm = ChatOpenAI(model_name="gpt-4.1-mini", temperature=0.0)
39
 
40
- # ─── 3) Wrap your three tools into a single ToolNode ───
41
- # ToolNode lets LangGraph know “these are the only tool functions the agent may call”
42
  tool_node = ToolNode([ocr_image, parse_excel, web_search])
43
 
44
- # ─── 4) Use create_react_agent to build a ReAct‐style agent for you ───
45
- # This single “agent” node will:
46
- # • Take the entire AgentState (including messages),
47
- # Look at state["messages"], decide if it needs to call a tool,
48
- # • If so, emit {"tool": "<tool_name>", "input": "<tool_input>"},
49
- # and then feed results back into the LLM automatically,
50
- # If not, emit a final answer as plain text.
51
- agent = create_react_agent(llm, tool_node)
52
-
53
- # ─── 5) Build a graph with exactly two edges, just like the tutorial ───
54
- graph = StateGraph(AgentState)
55
  graph.add_node("agent", agent)
56
 
57
- # 5.a) Whenever user input arrives, send it into the agent” node:
58
  graph.add_edge(START, "agent")
59
 
60
- # 5.b) Once agent produces its final text (not a tool call), go to END:
61
  graph.add_edge("agent", END)
62
 
63
- # 5.c) Compile so we can call .invoke(...) at runtime
64
  compiled_graph = graph.compile()
65
 
66
- # ─── 6) Define a simple function Gradio (and the “submit all” loop) can call ───
67
  def respond_to_input(user_input: str) -> str:
68
- initial_state: AgentState = {
69
- "messages": [], # No history on the first turn
70
- "tool_name": "",
71
- "tool_input": ""
 
 
 
 
 
72
  }
73
- # In v0.4.7, use .invoke(...) on the compiled graph
74
- final_state = compiled_graph.invoke(initial_state, user_input)
75
- # `create_react_agent` always appends its LLM text into state["messages"].
76
- # The last entry of “ASSISTANT: <answer>” is the final answer.
77
- last_line = final_state["messages"][-1]
78
- # If the agent prefixes with “ASSISTANT: ”, strip it off.
79
- return last_line.replace("ASSISTANT: ", "", 1)
80
-
 
 
 
 
 
81
 
82
  class BasicAgent:
83
  def __init__(self):
 
24
  # --- Constants ---
25
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
26
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ llm = ChatOpenAI(model_name="gpt-4.1-mini", temperature=0.0)
 
29
  tool_node = ToolNode([ocr_image, parse_excel, web_search])
30
 
31
+ agent = create_react_agent(
32
+ model=llm,
33
+ tools=tool_node
34
+ # (Use default prompt/state_schema; do NOT pass your own TypedDict.)
35
+ )
36
+
37
+ # ─── 4) Build a graph that simply wires START → "agent" → END ───
38
+ graph = StateGraph(dict) # We’ll use plain dicts instead of a custom TypedDict
 
 
 
39
  graph.add_node("agent", agent)
40
 
41
+ # 4.a) Whenever user input arrives, send it straight into the agent
42
  graph.add_edge(START, "agent")
43
 
44
+ # 4.b) Once the agent returns a final answer, go to END
45
  graph.add_edge("agent", END)
46
 
47
+ # 4.c) Compile so we can call `.invoke()` at runtime
48
  compiled_graph = graph.compile()
49
 
50
+ # ─── 5) Define `respond_to_input` to call `compiled_graph.invoke` ───
51
  def respond_to_input(user_input: str) -> str:
52
+ """
53
+ We must supply `{"messages": [ { "role": "user", "content": ... } ]}`
54
+ because create_react_agent expects `state["messages"]` to be a list of
55
+ {role,content} dicts, not plain strings.
56
+ """
57
+ initial_state: Dict[str, Any] = {
58
+ "messages": [
59
+ {"role": "user", "content": user_input}
60
+ ]
61
  }
62
+ # In langgraph 0.4.7, the compiled graph uses .invoke(...)
63
+ final_state = compiled_graph.invoke(initial_state)
64
+ # The default agent appends its own assistant messages to state["messages"].
65
+ # Extract the **last** message where role == "assistant".
66
+ assistant_messages = [
67
+ msg["content"]
68
+ for msg in final_state["messages"]
69
+ if msg.get("role") == "assistant"
70
+ ]
71
+ if not assistant_messages:
72
+ return "❗️Agent did not return any assistant message."
73
+ # Return the very last assistant “content”
74
+ return assistant_messages[-1]
75
 
76
  class BasicAgent:
77
  def __init__(self):