Stefan888 commited on
Commit
b7d70c9
·
1 Parent(s): 3086eea

some fixes with msg history

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -80,11 +80,17 @@ def assistant(state: AgentState):
80
  {textual_description_of_tool}
81
  """
82
  )
83
- user_prompt = HumanMessage(content=f"Question: {question.get('question', question)}")
84
- messages = [system_prompt, user_prompt] + state.get("messages", [])
 
 
 
 
85
  # If tool_outputs exist, add them as context
86
  if state.get("tool_outputs"):
87
- messages.append(HumanMessage(content=f"Tool results: {state['tool_outputs']}"))
 
 
88
  print(f"Messages sent to LLM: {messages}")
89
  response = llm_with_tools.invoke(messages)
90
  print(f"Raw LLM response: {response}")
@@ -94,6 +100,7 @@ def assistant(state: AgentState):
94
  print(f"Tool calls requested: {tool_calls}")
95
  state["tool_calls"] = tool_calls
96
  state["answer"] = "" # Not final yet
 
97
  else:
98
  state["answer"] = response.content.strip()
99
  print(f"Model response: {state['answer']}")
@@ -121,6 +128,8 @@ def tool_node(state: AgentState):
121
  outputs.append(result)
122
  state["tool_outputs"] = outputs
123
  state["tool_calls"] = None # Clear tool calls
 
 
124
  return state
125
 
126
  #building the graph
 
80
  {textual_description_of_tool}
81
  """
82
  )
83
+ # Always include conversation history
84
+ messages = [system_prompt] + state.get("messages", [])
85
+ # Add the user question only if not already present
86
+ if not any(isinstance(m, HumanMessage) and m.content.startswith("Question:") for m in messages):
87
+ user_prompt = HumanMessage(content=f"Question: {question.get('question', question)}")
88
+ messages.append(user_prompt)
89
  # If tool_outputs exist, add them as context
90
  if state.get("tool_outputs"):
91
+ tool_msg = HumanMessage(content=f"Tool results: {state['tool_outputs']}")
92
+ messages.append(tool_msg)
93
+ state.setdefault("messages", []).append(tool_msg)
94
  print(f"Messages sent to LLM: {messages}")
95
  response = llm_with_tools.invoke(messages)
96
  print(f"Raw LLM response: {response}")
 
100
  print(f"Tool calls requested: {tool_calls}")
101
  state["tool_calls"] = tool_calls
102
  state["answer"] = "" # Not final yet
103
+ state.setdefault("messages", []).append(AIMessage(content="Calling tool: " + str(tool_calls)))
104
  else:
105
  state["answer"] = response.content.strip()
106
  print(f"Model response: {state['answer']}")
 
128
  outputs.append(result)
129
  state["tool_outputs"] = outputs
130
  state["tool_calls"] = None # Clear tool calls
131
+ # Append tool output to conversation history
132
+ state.setdefault("messages", []).append(HumanMessage(content=f"Tool results: {outputs}"))
133
  return state
134
 
135
  #building the graph