AashitaK commited on
Commit
2b526c1
·
verified ·
1 Parent(s): bb23a87

Delete utils/session_history2.py

Browse files
Files changed (1) hide show
  1. utils/session_history2.py +0 -297
utils/session_history2.py DELETED
@@ -1,297 +0,0 @@
1
- import os
2
- import openai
3
- import logging
4
- from typing import Optional
5
-
6
- class ResponseManager:
7
- """
8
- This class initializes the OpenAI client and provides methods to create responses,
9
- maintain conversation history, and handle user queries.
10
- """
11
-
12
- def __init__(self,
13
- vector_store_id: Optional[str] = None,
14
- api_key: Optional[str] = None,
15
- meta_prompt_file: Optional[str] = None,
16
- model: str = "gpt-4o-mini",
17
- temperature: float = 0,
18
- max_output_tokens: int = 800,
19
- max_num_results: int = 15):
20
- """
21
- Initialize the ResponseManager with optional parameters for configuration.
22
- :param vector_store_id: The ID of the vector store to use for file search.
23
- :param api_key: The OpenAI API key for authentication.
24
- :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
25
- :param model: The OpenAI model to use (default: 'gpt-4o-mini').
26
- :param temperature: The temperature for response generation (default: 0).
27
- :param max_output_tokens: The maximum number of output tokens (default: 800).
28
- :param max_num_results: The maximum number of search results to return (default: 15).
29
- """
30
- # Load vector_store_id and api_key from environment variables if not provided
31
- self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
32
- if not self.vector_store_id:
33
- logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
34
- raise ValueError("VECTOR_STORE_ID is required.")
35
-
36
- self.api_key = api_key or os.getenv('OPENAI_API_KEY')
37
- if not self.api_key:
38
- logging.error("OPENAI_API_KEY is not provided or set in the environment.")
39
- raise ValueError("OPENAI_API_KEY is required.")
40
-
41
- # Initialize other attributes
42
- self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt'
43
- self.previous_response_id = None
44
-
45
- # Initialize the OpenAI client
46
- self.client = openai.OpenAI(api_key=self.api_key)
47
-
48
- # Load the meta prompt from the specified file
49
- self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
50
-
51
- # Set default parameters for response generation
52
- self.model = model
53
- self.temperature = temperature
54
- self.max_output_tokens = max_output_tokens
55
- self.max_num_results = max_num_results
56
-
57
- def reset_conversation(self):
58
- """
59
- Reset the conversation state internally maintained by OpenAI Response API.
60
- """
61
- self.previous_response_id = None
62
-
63
- def _load_meta_prompt(self, meta_prompt_file: str) -> str:
64
- """
65
- Load the meta prompt from the specified file.
66
- :param meta_prompt_file: Path to the meta prompt file.
67
- :return: The meta prompt as a string.
68
- """
69
- if not os.path.exists(meta_prompt_file):
70
- logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
71
- raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
72
- with open(meta_prompt_file, 'r', encoding='utf-8') as file:
73
- meta_prompt = file.read().strip()
74
- logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
75
- return meta_prompt
76
-
77
- def generate_response(self, query: str, history: list) -> list:
78
- """
79
- Generate a response to a user query using the OpenAI API.
80
- This method interacts with the OpenAI API to create a response based on the user's query.
81
- It supports optional parameters for model configuration and handles errors gracefully.
82
- Args:
83
- query (str): The user query to respond to.
84
- history (list): The conversation history from the chatbot.
85
- Returns:
86
- list: A list of dictionaries representing the conversation, including the generated response.
87
- """
88
- # Prepare the input for the API call
89
- input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
90
- input_data.append({"role": "user", "content": query})
91
-
92
- # Validate the query
93
- if not query.strip():
94
- logging.warning("Empty or invalid query received.")
95
- warning_message = "Please enter a valid query."
96
- input_data.append({"role": "assistant", "content": warning_message})
97
- return history + input_data
98
-
99
- try:
100
- logging.info("Sending request to OpenAI API...")
101
- response = self.client.responses.create(
102
- model=self.model,
103
- previous_response_id=self.previous_response_id,
104
- input=input_data,
105
- tools=[{
106
- "type": "file_search",
107
- "vector_store_ids": [self.vector_store_id],
108
- "max_num_results": self.max_num_results
109
- }],
110
- truncation="auto",
111
- temperature=self.temperature,
112
- max_output_tokens=self.max_output_tokens
113
- )
114
- self.previous_response_id = response.id
115
- logging.info("Response received successfully.")
116
- input_data.append({"role": "assistant", "content": response.output_text})
117
- return history + input_data
118
-
119
- except Exception as e:
120
- logging.error(f"An error occurred while generating a response: {e}")
121
- error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
122
- input_data.append({"role": "assistant", "content": error_message})
123
- return history + input_data
124
-
125
- import os
126
- import json
127
- import logging
128
- from typing import Optional
129
- import gradio as gr
130
- # from utils.response_manager import ResponseManager
131
-
132
- class ChatbotInterface:
133
- def __init__(self,
134
- config_path: str = 'config/gradio_config.json',
135
- model: str = "gpt-4o-mini",
136
- temperature: float = 0,
137
- max_output_tokens: int = 800,
138
- max_num_results: int = 15,
139
- vector_store_id: Optional[str] = None,
140
- api_key: Optional[str] = None,
141
- meta_prompt_file: Optional[str] = None):
142
- """
143
- Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager.
144
- :param config_path: Path to the configuration JSON file.
145
- :param model: The OpenAI model to use (default: 'gpt-4o-mini').
146
- :param temperature: The temperature for response generation (default: 0).
147
- :param max_output_tokens: The maximum number of output tokens (default: 800).
148
- :param max_num_results: The maximum number of search results to return (default: 15).
149
- :param vector_store_id: The ID of the vector store to use for file search.
150
- :param api_key: The OpenAI API key for authentication.
151
- :param meta_prompt_file: Path to the meta prompt file .
152
- """
153
- self.config = self.load_config(config_path)
154
- self.title = self.config["chatbot_title"]
155
- self.description = self.config["chatbot_description"]
156
- self.input_placeholder = self.config["chatbot_input_placeholder"]
157
- self.output_label = self.config["chatbot_output_label"]
158
-
159
- # Parameters for ResponseManager class
160
- self.model = model
161
- self.temperature = temperature
162
- self.max_output_tokens = max_output_tokens
163
- self.max_num_results = max_num_results
164
- self.vector_store_id = vector_store_id
165
- self.api_key = api_key
166
- self.meta_prompt_file = meta_prompt_file
167
-
168
-
169
- @staticmethod
170
- def load_config(config_path: str) -> dict:
171
- """
172
- Load the configuration for Gradio GUI interface from the JSON file.
173
- :param config_path: Path to the configuration JSON file.
174
- :return: Configuration dictionary.
175
- """
176
- logging.info(f"Loading configuration from {config_path}...")
177
- if not os.path.exists(config_path):
178
- logging.error(f"Configuration file not found: {config_path}")
179
- raise FileNotFoundError(f"Configuration file not found: {config_path}")
180
-
181
- with open(config_path, 'r') as config_file:
182
- config = json.load(config_file)
183
-
184
- required_keys = [
185
- "chatbot_title",
186
- "chatbot_description",
187
- "chatbot_input_placeholder",
188
- "chatbot_output_label"
189
- ]
190
-
191
- for key in required_keys:
192
- if key not in config:
193
- logging.error(f"Missing required configuration key: {key}")
194
- raise ValueError(f"Missing required configuration key: {key}")
195
-
196
- logging.info("Configuration loaded successfully.")
197
- return config
198
-
199
-
200
- def create_interface(self) -> gr.Blocks:
201
- """
202
- Create the Gradio Blocks interface that displays a single container including both
203
- the text input and a small arrow submit button. The interface will clear the text input
204
- after each message is submitted.
205
- """
206
- logging.info("Creating Gradio interface...")
207
-
208
- with gr.Blocks() as demo:
209
- # Title and description area.
210
- gr.Markdown(f"## {self.title}\n{self.description}")
211
-
212
- # Chatbot output area.
213
- chatbot_output = gr.Chatbot(label=self.output_label, type="messages")
214
-
215
- # # Session-specific state to store conversation history.
216
- # conversation_state = gr.State([])
217
-
218
- # Session-specific states
219
- conversation_state = gr.State([])
220
- response_manager_state = gr.State(None)
221
-
222
- # Use a gr.Row container as the input box with an integrated submit button.
223
- with gr.Row(elem_id="input-container", equal_height=True):
224
- user_input = gr.Textbox(
225
- lines=1,
226
- show_label=False, # Hide label for a unified look.
227
- elem_id="chat-input",
228
- placeholder=self.input_placeholder,
229
- scale=500,
230
- )
231
- reset = gr.ClearButton(
232
- value="Reset 🔄",
233
- variant="secondary",
234
- elem_id="reset-button",
235
- size="lg"
236
- )
237
-
238
- # 🟢 Initialization function for session-specific response manager
239
- def init_response_manager():
240
- try:
241
- rm = ResponseManager(
242
- model=self.model,
243
- temperature=self.temperature,
244
- max_output_tokens=self.max_output_tokens,
245
- max_num_results=self.max_num_results,
246
- vector_store_id=self.vector_store_id,
247
- api_key=self.api_key,
248
- meta_prompt_file=self.meta_prompt_file
249
- )
250
-
251
- logging.info(
252
- "ChatbotInterface initialized with the following parameters:\n"
253
- f" - Model: {self.model}\n"
254
- f" - Temperature: {self.temperature}\n"
255
- f" - Max Output Tokens: {self.max_output_tokens}\n"
256
- f" - Max Number of Results: {self.max_num_results}\n"
257
- )
258
-
259
- rm.reset_conversation()
260
- return rm
261
- except Exception as e:
262
- logging.error(f"Failed to initialize ResponseManager: {e}")
263
- raise
264
-
265
- # 🟢 Reset function updated to reset ResponseManager
266
- def reset_output():
267
- response_manager = init_response_manager()
268
- return [], response_manager, ""
269
-
270
- # 🟢 Process input now uses session-specific ResponseManager
271
- def process_input(user_message, chat_history, response_manager):
272
- updated_history = response_manager.generate_response(user_message, chat_history)
273
- return updated_history, updated_history, response_manager, ""
274
-
275
- # Initialize ResponseManager on load
276
- demo.load(
277
- fn=init_response_manager,
278
- inputs=None,
279
- outputs=response_manager_state
280
- )
281
-
282
-
283
-
284
- reset.click(
285
- fn=reset_output,
286
- inputs=None,
287
- outputs=[chatbot_output, response_manager_state, user_input]
288
- )
289
-
290
- user_input.submit(
291
- fn=process_input,
292
- inputs=[user_input, conversation_state, response_manager_state],
293
- outputs=[chatbot_output, conversation_state, response_manager_state, user_input]
294
- )
295
-
296
- logging.info("Gradio interface created successfully.")
297
- return demo