AashitaK commited on
Commit
cc5168c
·
verified ·
1 Parent(s): 25c9800

Update utils/session_history.py

Browse files
Files changed (1) hide show
  1. utils/session_history.py +236 -237
utils/session_history.py CHANGED
@@ -6,245 +6,244 @@ import gradio as gr
6
  from typing import Optional
7
 
8
  class ResponseManager:
9
- """
10
- This class initializes the OpenAI client and provides methods to create responses,
11
- maintain conversation history, and handle user queries.
12
- """
13
-
14
- def __init__(self,
15
- vector_store_id: Optional[str] = None,
16
- api_key: Optional[str] = None,
17
- meta_prompt_file: Optional[str] = None,
18
- model: str = "gpt-4o-mini",
19
- temperature: float = 0,
20
- max_output_tokens: int = 800,
21
- max_num_results: int = 15):
22
- """
23
- Initialize the ResponseManager with optional parameters for configuration.
24
- :param vector_store_id: The ID of the vector store to use for file search.
25
- :param api_key: The OpenAI API key for authentication.
26
- :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
27
- :param model: The OpenAI model to use (default: 'gpt-4o-mini').
28
- :param temperature: The temperature for response generation (default: 0).
29
- :param max_output_tokens: The maximum number of output tokens (default: 800).
30
- :param max_num_results: The maximum number of search results to return (default: 15).
31
- """
32
- # Load vector_store_id and api_key from environment variables if not provided
33
- self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
34
- if not self.vector_store_id:
35
- logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
36
- raise ValueError("VECTOR_STORE_ID is required.")
37
-
38
- self.api_key = api_key or os.getenv('OPENAI_API_KEY')
39
- if not self.api_key:
40
- logging.error("OPENAI_API_KEY is not provided or set in the environment.")
41
- raise ValueError("OPENAI_API_KEY is required.")
42
-
43
- # Initialize other attributes
44
- self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt'
45
- self.previous_response_id = None
46
-
47
- # Initialize the OpenAI client
48
- self.client = openai.OpenAI(api_key=self.api_key)
49
-
50
- # Load the meta prompt from the specified file
51
- self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
52
-
53
- # Set default parameters for response generation
54
- self.model = model
55
- self.temperature = temperature
56
- self.max_output_tokens = max_output_tokens
57
- self.max_num_results = max_num_results
58
-
59
- def _load_meta_prompt(self, meta_prompt_file: str) -> str:
60
  """
61
- Load the meta prompt from the specified file.
62
- :param meta_prompt_file: Path to the meta prompt file.
63
- :return: The meta prompt as a string.
64
- """
65
- if not os.path.exists(meta_prompt_file):
66
- logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
67
- raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
68
- with open(meta_prompt_file, 'r', encoding='utf-8') as file:
69
- meta_prompt = file.read().strip()
70
- logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
71
- return meta_prompt
72
-
73
- def generate_response(self, query: str, history: list) -> tuple:
74
  """
75
- Generate a response to a user query using the OpenAI API.
76
- This method interacts with the OpenAI API to create a response based on the user's query.
77
- It supports optional parameters for model configuration and handles errors gracefully.
78
- Args:
79
- query (str): The user query to respond to.
80
- history (list): The conversation history from the chatbot.
81
- Returns:
82
- tuple: (updated conversation list for display, updated conversation list for state)
83
- """
84
- # Prepare the input for the API call
85
- input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
86
- input_data.append({"role": "user", "content": query})
87
-
88
- # Validate the query
89
- if not query.strip():
90
- logging.warning("Empty or invalid query received.")
91
- warning_message = "Please enter a valid query."
92
- input_data.append({"role": "assistant", "content": warning_message})
93
- new_history = history + input_data
94
- return new_history, new_history
95
-
96
- try:
97
- logging.info("Sending request to OpenAI API...")
98
- response = self.client.responses.create(
99
- model=self.model,
100
- previous_response_id=self.previous_response_id,
101
- input=input_data,
102
- tools=[{
103
- "type": "file_search",
104
- "vector_store_ids": [self.vector_store_id],
105
- "max_num_results": self.max_num_results
106
- }],
107
- truncation="auto",
108
- temperature=self.temperature,
109
- max_output_tokens=self.max_output_tokens
110
- )
111
- self.previous_response_id = response.id
112
- logging.info("Response received successfully.")
113
- input_data.append({"role": "assistant", "content": response.output_text})
114
- new_history = history + input_data
115
- return new_history, new_history
116
- except Exception as e:
117
- logging.error(f"An error occurred while generating a response: {e}")
118
- error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
119
- input_data.append({"role": "assistant", "content": error_message})
120
- new_history = history + input_data
121
- return new_history, new_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  class ChatbotInterface:
124
- def init(self,
125
- config_path: str = 'config/gradio_config.json',
126
- model: str = "gpt-4o-mini",
127
- temperature: float = 0,
128
- max_output_tokens: int = 800,
129
- max_num_results: int = 15,
130
- vector_store_id: Optional[str] = None,
131
- api_key: Optional[str] = None,
132
- meta_prompt_file: Optional[str] = None):
133
- """
134
- Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager.
135
- :param config_path: Path to the configuration JSON file.
136
- :param model: The OpenAI model to use (default: 'gpt-4o-mini').
137
- :param temperature: The temperature for response generation (default: 0).
138
- :param max_output_tokens: The maximum number of output tokens (default: 800).
139
- :param max_num_results: The maximum number of search results to return (default: 15).
140
- :param vector_store_id: The ID of the vector store to use for file search.
141
- :param api_key: The OpenAI API key for authentication.
142
- :param meta_prompt_file: Path to the meta prompt file.
143
- """
144
- self.config = self.load_config(config_path)
145
- self.title = self.config["chatbot_title"]
146
- self.description = self.config["chatbot_description"]
147
- self.input_label = self.config["chatbot_input_label"]
148
- self.input_placeholder = self.config["chatbot_input_placeholder"]
149
- self.output_label = self.config["chatbot_output_label"]
150
- self.reset_button = self.config["chatbot_reset_button"]
151
- self.submit_button = self.config["chatbot_submit_button"]
152
-
153
- # Initialize ResponseManager with custom parameters
154
- try:
155
- self.response_manager = ResponseManager(
156
- model=model,
157
- temperature=temperature,
158
- max_output_tokens=max_output_tokens,
159
- max_num_results=max_num_results,
160
- vector_store_id=vector_store_id,
161
- api_key=api_key,
162
- meta_prompt_file=meta_prompt_file
163
- )
164
- self.generate_response = self.response_manager.generate_response
165
- logging.info(
166
- "ChatbotInterface initialized with the following parameters:\n"
167
- f" - Model: {model}\n"
168
- f" - Temperature: {temperature}\n"
169
- f" - Max Output Tokens: {max_output_tokens}\n"
170
- f" - Max Number of Results: {max_num_results}\n"
171
- f" - Vector Store ID: {vector_store_id}\n"
172
- f" - API Key: {'Provided' if api_key else 'Not Provided'}\n"
173
- f" - Meta Prompt File: {meta_prompt_file or 'Default'}"
174
- )
175
- except Exception as e:
176
- logging.error(f"Failed to initialize ResponseManager: {e}")
177
- raise
178
-
179
-
180
- @staticmethod
181
- def load_config(config_path: str) -> dict:
182
- """
183
- Load the configuration for Gradio GUI interface from the JSON file.
184
- :param config_path: Path to the configuration JSON file.
185
- :return: Configuration dictionary.
186
- """
187
- logging.info(f"Loading configuration from {config_path}...")
188
- if not os.path.exists(config_path):
189
- logging.error(f"Configuration file not found: {config_path}")
190
- raise FileNotFoundError(f"Configuration file not found: {config_path}")
191
-
192
- with open(config_path, 'r') as config_file:
193
- config = json.load(config_file)
194
-
195
- required_keys = [
196
- "chatbot_title", "chatbot_description", "chatbot_input_label",
197
- "chatbot_input_placeholder", "chatbot_output_label",
198
- "chatbot_reset_button", "chatbot_submit_button"
199
- ]
200
- for key in required_keys:
201
- if key not in config:
202
- logging.error(f"Missing required configuration key: {key}")
203
- raise ValueError(f"Missing required configuration key: {key}")
204
-
205
- logging.info("Configuration loaded successfully.")
206
- return config
207
-
208
- def reset_output(self) -> list:
209
- """
210
- Reset the chatbot output.
211
- :return: An empty list to reset the output.
212
- """
213
- return []
214
-
215
- def create_interface(self) -> gr.Blocks:
216
- """
217
- Create the Gradio Blocks interface.
218
- :return: A Gradio Blocks interface object.
219
- """
220
- logging.info("Creating Gradio interface...")
221
-
222
- # Define the Gradio Blocks interface
223
- with gr.Blocks() as demo:
224
- gr.Markdown(f"## {self.title}\n{self.description}")
225
-
226
- # Chatbot history component
227
- chatbot_output = gr.Chatbot(label=self.output_label, type="messages")
228
 
229
- # Adding a session-specific state to store conversation history.
230
- conversation_state = gr.State([])
231
-
232
- # User input
233
- user_input = gr.Textbox(
234
- lines=2,
235
- label=self.input_label,
236
- placeholder=self.input_placeholder
237
- )
238
-
239
- # Buttons
240
- with gr.Row():
241
- reset = gr.Button(self.reset_button, variant="secondary")
242
- submit = gr.Button(self.submit_button, variant="primary")
243
-
244
- submit.click(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
245
- user_input.submit(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
246
- reset.click(fn=self.reset_output, inputs=None, outputs=chatbot_output)
247
-
248
-
249
- logging.info("Gradio interface created successfully.")
250
- return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from typing import Optional
7
 
8
  class ResponseManager:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  """
10
+ This class initializes the OpenAI client and provides methods to create responses,
11
+ maintain conversation history, and handle user queries.
 
 
 
 
 
 
 
 
 
 
 
12
  """
13
+ def __init__(self,
14
+ vector_store_id: Optional[str] = None,
15
+ api_key: Optional[str] = None,
16
+ meta_prompt_file: Optional[str] = None,
17
+ model: str = "gpt-4o-mini",
18
+ temperature: float = 0,
19
+ max_output_tokens: int = 800,
20
+ max_num_results: int = 15):
21
+ """
22
+ Initialize the ResponseManager with optional parameters for configuration.
23
+ :param vector_store_id: The ID of the vector store to use for file search.
24
+ :param api_key: The OpenAI API key for authentication.
25
+ :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
26
+ :param model: The OpenAI model to use (default: 'gpt-4o-mini').
27
+ :param temperature: The temperature for response generation (default: 0).
28
+ :param max_output_tokens: The maximum number of output tokens (default: 800).
29
+ :param max_num_results: The maximum number of search results to return (default: 15).
30
+ """
31
+ # Load vector_store_id and api_key from environment variables if not provided
32
+ self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
33
+ if not self.vector_store_id:
34
+ logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
35
+ raise ValueError("VECTOR_STORE_ID is required.")
36
+
37
+ self.api_key = api_key or os.getenv('OPENAI_API_KEY')
38
+ if not self.api_key:
39
+ logging.error("OPENAI_API_KEY is not provided or set in the environment.")
40
+ raise ValueError("OPENAI_API_KEY is required.")
41
+
42
+ # Initialize other attributes
43
+ self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt'
44
+ self.previous_response_id = None
45
+
46
+ # Initialize the OpenAI client
47
+ self.client = openai.OpenAI(api_key=self.api_key)
48
+
49
+ # Load the meta prompt from the specified file
50
+ self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
51
+
52
+ # Set default parameters for response generation
53
+ self.model = model
54
+ self.temperature = temperature
55
+ self.max_output_tokens = max_output_tokens
56
+ self.max_num_results = max_num_results
57
+
58
+ def _load_meta_prompt(self, meta_prompt_file: str) -> str:
59
+ """
60
+ Load the meta prompt from the specified file.
61
+ :param meta_prompt_file: Path to the meta prompt file.
62
+ :return: The meta prompt as a string.
63
+ """
64
+ if not os.path.exists(meta_prompt_file):
65
+ logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
66
+ raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
67
+ with open(meta_prompt_file, 'r', encoding='utf-8') as file:
68
+ meta_prompt = file.read().strip()
69
+ logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
70
+ return meta_prompt
71
+
72
+ def generate_response(self, query: str, history: list) -> tuple:
73
+ """
74
+ Generate a response to a user query using the OpenAI API.
75
+ This method interacts with the OpenAI API to create a response based on the user's query.
76
+ It supports optional parameters for model configuration and handles errors gracefully.
77
+ Args:
78
+ query (str): The user query to respond to.
79
+ history (list): The conversation history from the chatbot.
80
+ Returns:
81
+ tuple: (updated conversation list for display, updated conversation list for state)
82
+ """
83
+ # Prepare the input for the API call
84
+ input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
85
+ input_data.append({"role": "user", "content": query})
86
+
87
+ # Validate the query
88
+ if not query.strip():
89
+ logging.warning("Empty or invalid query received.")
90
+ warning_message = "Please enter a valid query."
91
+ input_data.append({"role": "assistant", "content": warning_message})
92
+ new_history = history + input_data
93
+ return new_history, new_history
94
+
95
+ try:
96
+ logging.info("Sending request to OpenAI API...")
97
+ response = self.client.responses.create(
98
+ model=self.model,
99
+ previous_response_id=self.previous_response_id,
100
+ input=input_data,
101
+ tools=[{
102
+ "type": "file_search",
103
+ "vector_store_ids": [self.vector_store_id],
104
+ "max_num_results": self.max_num_results
105
+ }],
106
+ truncation="auto",
107
+ temperature=self.temperature,
108
+ max_output_tokens=self.max_output_tokens
109
+ )
110
+ self.previous_response_id = response.id
111
+ logging.info("Response received successfully.")
112
+ input_data.append({"role": "assistant", "content": response.output_text})
113
+ new_history = history + input_data
114
+ return new_history, new_history
115
+ except Exception as e:
116
+ logging.error(f"An error occurred while generating a response: {e}")
117
+ error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
118
+ input_data.append({"role": "assistant", "content": error_message})
119
+ new_history = history + input_data
120
+ return new_history, new_history
121
 
122
  class ChatbotInterface:
123
+ def init(self,
124
+ config_path: str = 'config/gradio_config.json',
125
+ model: str = "gpt-4o-mini",
126
+ temperature: float = 0,
127
+ max_output_tokens: int = 800,
128
+ max_num_results: int = 15,
129
+ vector_store_id: Optional[str] = None,
130
+ api_key: Optional[str] = None,
131
+ meta_prompt_file: Optional[str] = None):
132
+ """
133
+ Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager.
134
+ :param config_path: Path to the configuration JSON file.
135
+ :param model: The OpenAI model to use (default: 'gpt-4o-mini').
136
+ :param temperature: The temperature for response generation (default: 0).
137
+ :param max_output_tokens: The maximum number of output tokens (default: 800).
138
+ :param max_num_results: The maximum number of search results to return (default: 15).
139
+ :param vector_store_id: The ID of the vector store to use for file search.
140
+ :param api_key: The OpenAI API key for authentication.
141
+ :param meta_prompt_file: Path to the meta prompt file.
142
+ """
143
+ self.config = self.load_config(config_path)
144
+ self.title = self.config["chatbot_title"]
145
+ self.description = self.config["chatbot_description"]
146
+ self.input_label = self.config["chatbot_input_label"]
147
+ self.input_placeholder = self.config["chatbot_input_placeholder"]
148
+ self.output_label = self.config["chatbot_output_label"]
149
+ self.reset_button = self.config["chatbot_reset_button"]
150
+ self.submit_button = self.config["chatbot_submit_button"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
+ # Initialize ResponseManager with custom parameters
153
+ try:
154
+ self.response_manager = ResponseManager(
155
+ model=model,
156
+ temperature=temperature,
157
+ max_output_tokens=max_output_tokens,
158
+ max_num_results=max_num_results,
159
+ vector_store_id=vector_store_id,
160
+ api_key=api_key,
161
+ meta_prompt_file=meta_prompt_file
162
+ )
163
+ self.generate_response = self.response_manager.generate_response
164
+ logging.info(
165
+ "ChatbotInterface initialized with the following parameters:\n"
166
+ f" - Model: {model}\n"
167
+ f" - Temperature: {temperature}\n"
168
+ f" - Max Output Tokens: {max_output_tokens}\n"
169
+ f" - Max Number of Results: {max_num_results}\n"
170
+ f" - Vector Store ID: {vector_store_id}\n"
171
+ f" - API Key: {'Provided' if api_key else 'Not Provided'}\n"
172
+ f" - Meta Prompt File: {meta_prompt_file or 'Default'}"
173
+ )
174
+ except Exception as e:
175
+ logging.error(f"Failed to initialize ResponseManager: {e}")
176
+ raise
177
+
178
+
179
+ @staticmethod
180
+ def load_config(config_path: str) -> dict:
181
+ """
182
+ Load the configuration for Gradio GUI interface from the JSON file.
183
+ :param config_path: Path to the configuration JSON file.
184
+ :return: Configuration dictionary.
185
+ """
186
+ logging.info(f"Loading configuration from {config_path}...")
187
+ if not os.path.exists(config_path):
188
+ logging.error(f"Configuration file not found: {config_path}")
189
+ raise FileNotFoundError(f"Configuration file not found: {config_path}")
190
+
191
+ with open(config_path, 'r') as config_file:
192
+ config = json.load(config_file)
193
+
194
+ required_keys = [
195
+ "chatbot_title", "chatbot_description", "chatbot_input_label",
196
+ "chatbot_input_placeholder", "chatbot_output_label",
197
+ "chatbot_reset_button", "chatbot_submit_button"
198
+ ]
199
+ for key in required_keys:
200
+ if key not in config:
201
+ logging.error(f"Missing required configuration key: {key}")
202
+ raise ValueError(f"Missing required configuration key: {key}")
203
+
204
+ logging.info("Configuration loaded successfully.")
205
+ return config
206
+
207
+ def reset_output(self) -> list:
208
+ """
209
+ Reset the chatbot output.
210
+ :return: An empty list to reset the output.
211
+ """
212
+ return []
213
+
214
+ def create_interface(self) -> gr.Blocks:
215
+ """
216
+ Create the Gradio Blocks interface.
217
+ :return: A Gradio Blocks interface object.
218
+ """
219
+ logging.info("Creating Gradio interface...")
220
+
221
+ # Define the Gradio Blocks interface
222
+ with gr.Blocks() as demo:
223
+ gr.Markdown(f"## {self.title}\n{self.description}")
224
+
225
+ # Chatbot history component
226
+ chatbot_output = gr.Chatbot(label=self.output_label, type="messages")
227
+
228
+ # Adding a session-specific state to store conversation history.
229
+ conversation_state = gr.State([])
230
+
231
+ # User input
232
+ user_input = gr.Textbox(
233
+ lines=2,
234
+ label=self.input_label,
235
+ placeholder=self.input_placeholder
236
+ )
237
+
238
+ # Buttons
239
+ with gr.Row():
240
+ reset = gr.Button(self.reset_button, variant="secondary")
241
+ submit = gr.Button(self.submit_button, variant="primary")
242
+
243
+ submit.click(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
244
+ user_input.submit(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
245
+ reset.click(fn=self.reset_output, inputs=None, outputs=chatbot_output)
246
+
247
+
248
+ logging.info("Gradio interface created successfully.")
249
+ return demo