laverdes commited on
Commit
f1ede08
·
verified ·
1 Parent(s): bae187d

feat: generating final answer if specified

Browse files
Files changed (1) hide show
  1. basic_agent.py +41 -7
basic_agent.py CHANGED
@@ -40,6 +40,31 @@ def print_conversation(messages):
40
  console.print(panel)
41
 
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  class AgentState(TypedDict):
44
  messages: Annotated[Sequence[BaseMessage], add_messages]
45
 
@@ -104,7 +129,7 @@ class BasicOpenAIAgentWorkflow:
104
  self.graph = graph.compile()
105
 
106
 
107
- def chat(self, query, verbose=2):
108
  """Simple agent call"""
109
  if isinstance(query, dict):
110
  query = query["messages"]
@@ -121,16 +146,25 @@ class BasicOpenAIAgentWorkflow:
121
  if not self.history_messages:
122
  raise ValueError("Converted message history is empty. Something went wrong.")
123
 
124
- response = self.graph.invoke({'messages': self.history_messages}) # invoke with all the history
125
  response = response['messages'][-1].content
 
 
 
 
 
 
 
 
 
126
  assistant_message = {'role': 'assistant', 'content': response}
127
  self.history.append(assistant_message)
128
-
129
  if verbose==2:
130
  print_conversation(self.history)
131
  elif verbose==1:
132
  print_conversation([response])
133
-
134
  return response
135
 
136
 
@@ -141,12 +175,12 @@ class BasicOpenAIAgentWorkflow:
141
  return {'messages': self.history_messages}
142
 
143
 
144
- def chat_batch(self, queries=None):
145
  """Send several simple agent calls to the llm using the compiled graph"""
146
  if queries is None:
147
  queries = []
148
  for i, query in tqdm(enumerate(queries, start=1)):
149
  if i == len(queries):
150
- self.chat(query, verbose=2)
151
  else:
152
- self.chat(query, verbose=0)
 
40
  console.print(panel)
41
 
42
 
43
+ def generate_final_answer(qa: dict[str, str]) -> str:
44
+ """Invokes gpt-4o-mini to extract generate a final answer based on the content query, response, and metadata"""
45
+
46
+ final_answer_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
47
+
48
+ system_prompt = (
49
+ "You will receive a JSON string containing a user's query, a response, and metadata. "
50
+ "Extract and return only the final answer to the query as a plain string. "
51
+ "Do not return anything else. "
52
+ "Avoid any labels, prefixes, or explanation. "
53
+ "Return only the exact value that satisfies the query, suitable for string comparison."
54
+ "If the query is not answerable due to a missing file in the input and is reflected in the response, answer with 'File not found'. "
55
+ )
56
+
57
+ system_message = SystemMessage(content=system_prompt)
58
+ messages = [
59
+ system_message,
60
+ HumanMessage(content=f'Generate the final answer for the following query:\n\n{json.dumps(qa)}')
61
+ ]
62
+
63
+ response = final_answer_llm.invoke(messages)
64
+
65
+ return response.content
66
+
67
+
68
  class AgentState(TypedDict):
69
  messages: Annotated[Sequence[BaseMessage], add_messages]
70
 
 
129
  self.graph = graph.compile()
130
 
131
 
132
+ def chat(self, query, verbose=2, only_final_answer=False):
133
  """Simple agent call"""
134
  if isinstance(query, dict):
135
  query = query["messages"]
 
146
  if not self.history_messages:
147
  raise ValueError("Converted message history is empty. Something went wrong.")
148
 
149
+ response = self.graph.invoke({'messages': self.history_messages}) # invoke with all the history to keep context (dummy mem)
150
  response = response['messages'][-1].content
151
+
152
+ if only_final_answer:
153
+ final_answer_content = {
154
+ 'query': query,
155
+ 'response': response,
156
+ 'metadata': {}
157
+ }
158
+ response = generate_final_answer(final_answer_content)
159
+
160
  assistant_message = {'role': 'assistant', 'content': response}
161
  self.history.append(assistant_message)
162
+
163
  if verbose==2:
164
  print_conversation(self.history)
165
  elif verbose==1:
166
  print_conversation([response])
167
+
168
  return response
169
 
170
 
 
175
  return {'messages': self.history_messages}
176
 
177
 
178
+ def chat_batch(self, queries=None, only_final_answer=False):
179
  """Send several simple agent calls to the llm using the compiled graph"""
180
  if queries is None:
181
  queries = []
182
  for i, query in tqdm(enumerate(queries, start=1)):
183
  if i == len(queries):
184
+ self.chat(query, verbose=2, only_final_answer=only_final_answer)
185
  else:
186
+ self.chat(query, verbose=0, only_final_answer=only_final_answer)