AashitaK commited on
Commit
caf13db
·
verified ·
1 Parent(s): 23797db

Update utils/response_manager.py

Browse files
Files changed (1) hide show
  1. utils/response_manager.py +16 -68
utils/response_manager.py CHANGED
@@ -76,19 +76,16 @@ class ResponseManager:
76
  logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
77
  return meta_prompt
78
 
79
- def create_response(self, query: str,
80
- model: Optional[str] = None,
81
- temperature: Optional[float] = None,
82
- max_output_tokens: Optional[int] = None,
83
- max_num_results: Optional[int] = None) -> str:
84
  """
85
- Create a response to a user query using the OpenAI API.
86
- :param query: The user query to respond to.
87
- :param model: The OpenAI model to use (default is "gpt-4o-mini").
88
- :param temperature: The temperature for the response (default is 0).
89
- :param max_output_tokens: The maximum number of output tokens (default is 800).
90
- :param max_num_results: The maximum number of search results to return (default is 15).
91
- :return: The response text from the OpenAI API.
 
92
  """
93
  # Prepare the input for the API call
94
  input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
@@ -99,80 +96,31 @@ class ResponseManager:
99
  logging.warning("Empty or invalid query received.")
100
  warning_message = "Please enter a valid query."
101
  input_data.append({"role": "assistant", "content": warning_message})
102
- return input_data
103
-
104
- # Set default values for optional parameters
105
- model = model or self.DEFAULT_MODEL
106
- temperature = temperature if temperature is not None else self.DEFAULT_TEMPERATURE
107
- max_output_tokens = max_output_tokens if max_output_tokens is not None else self.DEFAULT_MAX_OUTPUT_TOKENS
108
- max_num_results = max_num_results if max_num_results is not None else self.DEFAULT_MAX_NUM_RESULTS
109
 
110
  try:
111
  logging.info("Sending request to OpenAI API...")
112
  response = self.client.responses.create(
113
- model=model,
114
  previous_response_id=self.previous_response_id,
115
  input=input_data,
116
  tools=[{
117
  "type": "file_search",
118
  "vector_store_ids": [self.vector_store_id],
119
- "max_num_results": max_num_results
120
  }],
121
- temperature=temperature,
122
- max_output_tokens=max_output_tokens
123
  )
124
  self.previous_response_id = response.id
125
  logging.info("Response received successfully.")
126
  input_data.append({"role": "assistant", "content": response.output_text})
127
- return input_data
128
 
129
  except Exception as e:
130
  logging.error(f"An error occurred while generating a response: {e}")
131
  error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
132
  input_data.append({"role": "assistant", "content": error_message})
133
- return input_data
134
 
135
- def conversation(self, query: str, history: list,
136
- model: Optional[str] = None,
137
- temperature: Optional[float] = None,
138
- max_output_tokens: Optional[int] = None,
139
- max_num_results: Optional[int] = None
140
- ) -> list:
141
- """
142
- Function to handle the chatbot interaction and maintain conversation history.
143
- :param query: The user query to respond to.
144
- :param model: The OpenAI model to use (default is "gpt-4o-mini").
145
- :param temperature: The temperature for the response (default is 0).
146
- :param max_output_tokens: The maximum number of output tokens (default is 800).
147
- :param max_num_results: The maximum number of search results to return (default is 15).
148
- :param history: The conversation history (list of [input, output] pairs).
149
- :return: Updated conversation history.
150
- """
151
- logging.info("Received query: %s", query)
152
-
153
- try:
154
- # Set default values for optional parameters
155
- model = model or self.DEFAULT_MODEL
156
- temperature = temperature if temperature is not None else self.DEFAULT_TEMPERATURE
157
- max_output_tokens = max_output_tokens if max_output_tokens is not None else self.DEFAULT_MAX_OUTPUT_TOKENS
158
- max_num_results = max_num_results if max_num_results is not None else self.DEFAULT_MAX_NUM_RESULTS
159
-
160
- # Generate a response using the create_response method
161
- logging.info("Generating response for the query...")
162
- response = self.create_response(
163
- query=query,
164
- model=model,
165
- temperature=temperature,
166
- max_output_tokens=max_output_tokens,
167
- max_num_results=max_num_results
168
- )
169
- # Append the user query and response to the conversation history
170
- history += response
171
- return history
172
-
173
- except Exception as e:
174
- # Log the error and append it to the conversation history
175
- logging.error("An error occurred while generating a response: %s", str(e))
176
- # history.append((query, f"An error occurred: {str(e)}"))
177
- return history
178
 
 
76
  logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
77
  return meta_prompt
78
 
79
+ def generate_response(self, query: str, history: list) -> list:
 
 
 
 
80
  """
81
+ Generate a response to a user query using the OpenAI API.
82
+ This method interacts with the OpenAI API to create a response based on the user's query.
83
+ It supports optional parameters for model configuration and handles errors gracefully.
84
+ Args:
85
+ query (str): The user query to respond to.
86
+ history (list): The conversation history from the chatbot.
87
+ Returns:
88
+ list: A list of dictionaries representing the conversation, including the generated response.
89
  """
90
  # Prepare the input for the API call
91
  input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
 
96
  logging.warning("Empty or invalid query received.")
97
  warning_message = "Please enter a valid query."
98
  input_data.append({"role": "assistant", "content": warning_message})
99
+ return history + input_data
 
 
 
 
 
 
100
 
101
  try:
102
  logging.info("Sending request to OpenAI API...")
103
  response = self.client.responses.create(
104
+ model=self.DEFAULT_MODEL,
105
  previous_response_id=self.previous_response_id,
106
  input=input_data,
107
  tools=[{
108
  "type": "file_search",
109
  "vector_store_ids": [self.vector_store_id],
110
+ "max_num_results": self.DEFAULT_MAX_NUM_RESULTS
111
  }],
112
+ temperature=self.DEFAULT_TEMPERATURE,
113
+ max_output_tokens=self.DEFAULT_MAX_OUTPUT_TOKENS
114
  )
115
  self.previous_response_id = response.id
116
  logging.info("Response received successfully.")
117
  input_data.append({"role": "assistant", "content": response.output_text})
118
+ return history + input_data
119
 
120
  except Exception as e:
121
  logging.error(f"An error occurred while generating a response: {e}")
122
  error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
123
  input_data.append({"role": "assistant", "content": error_message})
124
+ return history + input_data
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126