AashitaK commited on
Commit
efcd67e
·
verified ·
1 Parent(s): 7b7d6d2

Update utils/response_manager.py

Browse files
Files changed (1) hide show
  1. utils/response_manager.py +24 -34
utils/response_manager.py CHANGED
@@ -3,43 +3,29 @@ import openai
3
  import logging
4
  from typing import Optional
5
 
6
-
7
  class ResponseManager:
8
  """
9
  This class initializes the OpenAI client and provides methods to create responses,
10
  maintain conversation history, and handle user queries.
11
- Attributes:
12
- DEFAULT_META_PROMPT_FILE (str): Default path to the meta prompt file.
13
- DEFAULT_MODEL (str): Default OpenAI model to use.
14
- DEFAULT_TEMPERATURE (float): Default temperature for response generation.
15
- DEFAULT_MAX_OUTPUT_TOKENS (int): Default maximum number of output tokens.
16
- DEFAULT_MAX_NUM_RESULTS (int): Default maximum number of search results.
17
- Methods:
18
- __init__(vector_store_id: Optional[str], api_key: Optional[str], meta_prompt_file: Optional[str]):
19
- Initializes the ResponseManager with a vector store ID, API key, and meta prompt file.
20
- _load_meta_prompt(meta_prompt_file: str) -> str:
21
- Loads the meta prompt from the specified file.
22
- create_response(query: str, model: Optional[str], temperature: Optional[float],
23
- max_output_tokens: Optional[int], max_num_results: Optional[int]) -> str:
24
- Creates a response to a user query using the OpenAI API.
25
- conversation(query: str, model: Optional[str], temperature: Optional[float],
26
- max_output_tokens: Optional[int], max_num_results: Optional[int],
27
- Handles chatbot interaction and maintains conversation history.
28
  """
29
 
30
- DEFAULT_META_PROMPT_FILE = 'config/meta_prompt.txt'
31
- DEFAULT_MODEL = "gpt-4o-mini"
32
- DEFAULT_TEMPERATURE = 0
33
- DEFAULT_MAX_OUTPUT_TOKENS = 800
34
- DEFAULT_MAX_NUM_RESULTS = 15
35
-
36
-
37
- def __init__(self, vector_store_id: Optional[str] = None, api_key: Optional[str] = None, meta_prompt_file: Optional[str] = None):
38
  """
39
- Initialize the ResponseManager with a vector store ID, API key, and meta prompt file.
40
  :param vector_store_id: The ID of the vector store to use for file search.
41
  :param api_key: The OpenAI API key for authentication.
42
  :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
 
 
 
 
43
  """
44
  # Load vector_store_id and api_key from environment variables if not provided
45
  self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
@@ -62,6 +48,12 @@ class ResponseManager:
62
  # Load the meta prompt from the specified file
63
  self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
64
 
 
 
 
 
 
 
65
  def _load_meta_prompt(self, meta_prompt_file: str) -> str:
66
  """
67
  Load the meta prompt from the specified file.
@@ -101,16 +93,16 @@ class ResponseManager:
101
  try:
102
  logging.info("Sending request to OpenAI API...")
103
  response = self.client.responses.create(
104
- model=self.DEFAULT_MODEL,
105
  previous_response_id=self.previous_response_id,
106
  input=input_data,
107
  tools=[{
108
  "type": "file_search",
109
  "vector_store_ids": [self.vector_store_id],
110
- "max_num_results": self.DEFAULT_MAX_NUM_RESULTS
111
  }],
112
- temperature=self.DEFAULT_TEMPERATURE,
113
- max_output_tokens=self.DEFAULT_MAX_OUTPUT_TOKENS
114
  )
115
  self.previous_response_id = response.id
116
  logging.info("Response received successfully.")
@@ -121,6 +113,4 @@ class ResponseManager:
121
  logging.error(f"An error occurred while generating a response: {e}")
122
  error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
123
  input_data.append({"role": "assistant", "content": error_message})
124
- return history + input_data
125
-
126
-
 
3
  import logging
4
  from typing import Optional
5
 
 
6
  class ResponseManager:
7
  """
8
  This class initializes the OpenAI client and provides methods to create responses,
9
  maintain conversation history, and handle user queries.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  """
11
 
12
+ def __init__(self,
13
+ vector_store_id: Optional[str] = None,
14
+ api_key: Optional[str] = None,
15
+ meta_prompt_file: Optional[str] = None,
16
+ model: str = "gpt-4o-mini",
17
+ temperature: float = 0,
18
+ max_output_tokens: int = 800,
19
+ max_num_results: int = 15):
20
  """
21
+ Initialize the ResponseManager with optional parameters for configuration.
22
  :param vector_store_id: The ID of the vector store to use for file search.
23
  :param api_key: The OpenAI API key for authentication.
24
  :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
25
+ :param model: The OpenAI model to use (default: 'gpt-4o-mini').
26
+ :param temperature: The temperature for response generation (default: 0).
27
+ :param max_output_tokens: The maximum number of output tokens (default: 800).
28
+ :param max_num_results: The maximum number of search results to return (default: 15).
29
  """
30
  # Load vector_store_id and api_key from environment variables if not provided
31
  self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
 
48
  # Load the meta prompt from the specified file
49
  self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
50
 
51
+ # Set default parameters for response generation
52
+ self.model = model
53
+ self.temperature = temperature
54
+ self.max_output_tokens = max_output_tokens
55
+ self.max_num_results = max_num_results
56
+
57
  def _load_meta_prompt(self, meta_prompt_file: str) -> str:
58
  """
59
  Load the meta prompt from the specified file.
 
93
  try:
94
  logging.info("Sending request to OpenAI API...")
95
  response = self.client.responses.create(
96
+ model=self.model,
97
  previous_response_id=self.previous_response_id,
98
  input=input_data,
99
  tools=[{
100
  "type": "file_search",
101
  "vector_store_ids": [self.vector_store_id],
102
+ "max_num_results": self.max_num_results
103
  }],
104
+ temperature=self.temperature,
105
+ max_output_tokens=self.max_output_tokens
106
  )
107
  self.previous_response_id = response.id
108
  logging.info("Response received successfully.")
 
113
  logging.error(f"An error occurred while generating a response: {e}")
114
  error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
115
  input_data.append({"role": "assistant", "content": error_message})
116
+ return history + input_data