Spaces:
Sleeping
Sleeping
Create session_history.py
Browse files- utils/session_history.py +250 -0
utils/session_history.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import openai
|
3 |
+
import logging
|
4 |
+
import json
|
5 |
+
import gradio as gr
|
6 |
+
from typing import Optional
|
7 |
+
|
8 |
+
class ResponseManager:
|
9 |
+
"""
|
10 |
+
This class initializes the OpenAI client and provides methods to create responses,
|
11 |
+
maintain conversation history, and handle user queries.
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self,
|
15 |
+
vector_store_id: Optional[str] = None,
|
16 |
+
api_key: Optional[str] = None,
|
17 |
+
meta_prompt_file: Optional[str] = None,
|
18 |
+
model: str = "gpt-4o-mini",
|
19 |
+
temperature: float = 0,
|
20 |
+
max_output_tokens: int = 800,
|
21 |
+
max_num_results: int = 15):
|
22 |
+
"""
|
23 |
+
Initialize the ResponseManager with optional parameters for configuration.
|
24 |
+
:param vector_store_id: The ID of the vector store to use for file search.
|
25 |
+
:param api_key: The OpenAI API key for authentication.
|
26 |
+
:param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
|
27 |
+
:param model: The OpenAI model to use (default: 'gpt-4o-mini').
|
28 |
+
:param temperature: The temperature for response generation (default: 0).
|
29 |
+
:param max_output_tokens: The maximum number of output tokens (default: 800).
|
30 |
+
:param max_num_results: The maximum number of search results to return (default: 15).
|
31 |
+
"""
|
32 |
+
# Load vector_store_id and api_key from environment variables if not provided
|
33 |
+
self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
|
34 |
+
if not self.vector_store_id:
|
35 |
+
logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
|
36 |
+
raise ValueError("VECTOR_STORE_ID is required.")
|
37 |
+
|
38 |
+
self.api_key = api_key or os.getenv('OPENAI_API_KEY')
|
39 |
+
if not self.api_key:
|
40 |
+
logging.error("OPENAI_API_KEY is not provided or set in the environment.")
|
41 |
+
raise ValueError("OPENAI_API_KEY is required.")
|
42 |
+
|
43 |
+
# Initialize other attributes
|
44 |
+
self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt'
|
45 |
+
self.previous_response_id = None
|
46 |
+
|
47 |
+
# Initialize the OpenAI client
|
48 |
+
self.client = openai.OpenAI(api_key=self.api_key)
|
49 |
+
|
50 |
+
# Load the meta prompt from the specified file
|
51 |
+
self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
|
52 |
+
|
53 |
+
# Set default parameters for response generation
|
54 |
+
self.model = model
|
55 |
+
self.temperature = temperature
|
56 |
+
self.max_output_tokens = max_output_tokens
|
57 |
+
self.max_num_results = max_num_results
|
58 |
+
|
59 |
+
def _load_meta_prompt(self, meta_prompt_file: str) -> str:
|
60 |
+
"""
|
61 |
+
Load the meta prompt from the specified file.
|
62 |
+
:param meta_prompt_file: Path to the meta prompt file.
|
63 |
+
:return: The meta prompt as a string.
|
64 |
+
"""
|
65 |
+
if not os.path.exists(meta_prompt_file):
|
66 |
+
logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
|
67 |
+
raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
|
68 |
+
with open(meta_prompt_file, 'r', encoding='utf-8') as file:
|
69 |
+
meta_prompt = file.read().strip()
|
70 |
+
logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
|
71 |
+
return meta_prompt
|
72 |
+
|
73 |
+
def generate_response(self, query: str, history: list) -> tuple:
|
74 |
+
"""
|
75 |
+
Generate a response to a user query using the OpenAI API.
|
76 |
+
This method interacts with the OpenAI API to create a response based on the user's query.
|
77 |
+
It supports optional parameters for model configuration and handles errors gracefully.
|
78 |
+
Args:
|
79 |
+
query (str): The user query to respond to.
|
80 |
+
history (list): The conversation history from the chatbot.
|
81 |
+
Returns:
|
82 |
+
tuple: (updated conversation list for display, updated conversation list for state)
|
83 |
+
"""
|
84 |
+
# Prepare the input for the API call
|
85 |
+
input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
|
86 |
+
input_data.append({"role": "user", "content": query})
|
87 |
+
|
88 |
+
# Validate the query
|
89 |
+
if not query.strip():
|
90 |
+
logging.warning("Empty or invalid query received.")
|
91 |
+
warning_message = "Please enter a valid query."
|
92 |
+
input_data.append({"role": "assistant", "content": warning_message})
|
93 |
+
new_history = history + input_data
|
94 |
+
return new_history, new_history
|
95 |
+
|
96 |
+
try:
|
97 |
+
logging.info("Sending request to OpenAI API...")
|
98 |
+
response = self.client.responses.create(
|
99 |
+
model=self.model,
|
100 |
+
previous_response_id=self.previous_response_id,
|
101 |
+
input=input_data,
|
102 |
+
tools=[{
|
103 |
+
"type": "file_search",
|
104 |
+
"vector_store_ids": [self.vector_store_id],
|
105 |
+
"max_num_results": self.max_num_results
|
106 |
+
}],
|
107 |
+
truncation="auto",
|
108 |
+
temperature=self.temperature,
|
109 |
+
max_output_tokens=self.max_output_tokens
|
110 |
+
)
|
111 |
+
self.previous_response_id = response.id
|
112 |
+
logging.info("Response received successfully.")
|
113 |
+
input_data.append({"role": "assistant", "content": response.output_text})
|
114 |
+
new_history = history + input_data
|
115 |
+
return new_history, new_history
|
116 |
+
except Exception as e:
|
117 |
+
logging.error(f"An error occurred while generating a response: {e}")
|
118 |
+
error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
|
119 |
+
input_data.append({"role": "assistant", "content": error_message})
|
120 |
+
new_history = history + input_data
|
121 |
+
return new_history, new_history
|
122 |
+
|
123 |
+
class ChatbotInterface:
|
124 |
+
def init(self,
|
125 |
+
config_path: str = 'config/gradio_config.json',
|
126 |
+
model: str = "gpt-4o-mini",
|
127 |
+
temperature: float = 0,
|
128 |
+
max_output_tokens: int = 800,
|
129 |
+
max_num_results: int = 15,
|
130 |
+
vector_store_id: Optional[str] = None,
|
131 |
+
api_key: Optional[str] = None,
|
132 |
+
meta_prompt_file: Optional[str] = None):
|
133 |
+
"""
|
134 |
+
Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager.
|
135 |
+
:param config_path: Path to the configuration JSON file.
|
136 |
+
:param model: The OpenAI model to use (default: 'gpt-4o-mini').
|
137 |
+
:param temperature: The temperature for response generation (default: 0).
|
138 |
+
:param max_output_tokens: The maximum number of output tokens (default: 800).
|
139 |
+
:param max_num_results: The maximum number of search results to return (default: 15).
|
140 |
+
:param vector_store_id: The ID of the vector store to use for file search.
|
141 |
+
:param api_key: The OpenAI API key for authentication.
|
142 |
+
:param meta_prompt_file: Path to the meta prompt file.
|
143 |
+
"""
|
144 |
+
self.config = self.load_config(config_path)
|
145 |
+
self.title = self.config["chatbot_title"]
|
146 |
+
self.description = self.config["chatbot_description"]
|
147 |
+
self.input_label = self.config["chatbot_input_label"]
|
148 |
+
self.input_placeholder = self.config["chatbot_input_placeholder"]
|
149 |
+
self.output_label = self.config["chatbot_output_label"]
|
150 |
+
self.reset_button = self.config["chatbot_reset_button"]
|
151 |
+
self.submit_button = self.config["chatbot_submit_button"]
|
152 |
+
|
153 |
+
# Initialize ResponseManager with custom parameters
|
154 |
+
try:
|
155 |
+
self.response_manager = ResponseManager(
|
156 |
+
model=model,
|
157 |
+
temperature=temperature,
|
158 |
+
max_output_tokens=max_output_tokens,
|
159 |
+
max_num_results=max_num_results,
|
160 |
+
vector_store_id=vector_store_id,
|
161 |
+
api_key=api_key,
|
162 |
+
meta_prompt_file=meta_prompt_file
|
163 |
+
)
|
164 |
+
self.generate_response = self.response_manager.generate_response
|
165 |
+
logging.info(
|
166 |
+
"ChatbotInterface initialized with the following parameters:\n"
|
167 |
+
f" - Model: {model}\n"
|
168 |
+
f" - Temperature: {temperature}\n"
|
169 |
+
f" - Max Output Tokens: {max_output_tokens}\n"
|
170 |
+
f" - Max Number of Results: {max_num_results}\n"
|
171 |
+
f" - Vector Store ID: {vector_store_id}\n"
|
172 |
+
f" - API Key: {'Provided' if api_key else 'Not Provided'}\n"
|
173 |
+
f" - Meta Prompt File: {meta_prompt_file or 'Default'}"
|
174 |
+
)
|
175 |
+
except Exception as e:
|
176 |
+
logging.error(f"Failed to initialize ResponseManager: {e}")
|
177 |
+
raise
|
178 |
+
|
179 |
+
|
180 |
+
@staticmethod
|
181 |
+
def load_config(config_path: str) -> dict:
|
182 |
+
"""
|
183 |
+
Load the configuration for Gradio GUI interface from the JSON file.
|
184 |
+
:param config_path: Path to the configuration JSON file.
|
185 |
+
:return: Configuration dictionary.
|
186 |
+
"""
|
187 |
+
logging.info(f"Loading configuration from {config_path}...")
|
188 |
+
if not os.path.exists(config_path):
|
189 |
+
logging.error(f"Configuration file not found: {config_path}")
|
190 |
+
raise FileNotFoundError(f"Configuration file not found: {config_path}")
|
191 |
+
|
192 |
+
with open(config_path, 'r') as config_file:
|
193 |
+
config = json.load(config_file)
|
194 |
+
|
195 |
+
required_keys = [
|
196 |
+
"chatbot_title", "chatbot_description", "chatbot_input_label",
|
197 |
+
"chatbot_input_placeholder", "chatbot_output_label",
|
198 |
+
"chatbot_reset_button", "chatbot_submit_button"
|
199 |
+
]
|
200 |
+
for key in required_keys:
|
201 |
+
if key not in config:
|
202 |
+
logging.error(f"Missing required configuration key: {key}")
|
203 |
+
raise ValueError(f"Missing required configuration key: {key}")
|
204 |
+
|
205 |
+
logging.info("Configuration loaded successfully.")
|
206 |
+
return config
|
207 |
+
|
208 |
+
def reset_output(self) -> list:
|
209 |
+
"""
|
210 |
+
Reset the chatbot output.
|
211 |
+
:return: An empty list to reset the output.
|
212 |
+
"""
|
213 |
+
return []
|
214 |
+
|
215 |
+
def create_interface(self) -> gr.Blocks:
|
216 |
+
"""
|
217 |
+
Create the Gradio Blocks interface.
|
218 |
+
:return: A Gradio Blocks interface object.
|
219 |
+
"""
|
220 |
+
logging.info("Creating Gradio interface...")
|
221 |
+
|
222 |
+
# Define the Gradio Blocks interface
|
223 |
+
with gr.Blocks() as demo:
|
224 |
+
gr.Markdown(f"## {self.title}\n{self.description}")
|
225 |
+
|
226 |
+
# Chatbot history component
|
227 |
+
chatbot_output = gr.Chatbot(label=self.output_label, type="messages")
|
228 |
+
|
229 |
+
# Adding a session-specific state to store conversation history.
|
230 |
+
conversation_state = gr.State([])
|
231 |
+
|
232 |
+
# User input
|
233 |
+
user_input = gr.Textbox(
|
234 |
+
lines=2,
|
235 |
+
label=self.input_label,
|
236 |
+
placeholder=self.input_placeholder
|
237 |
+
)
|
238 |
+
|
239 |
+
# Buttons
|
240 |
+
with gr.Row():
|
241 |
+
reset = gr.Button(self.reset_button, variant="secondary")
|
242 |
+
submit = gr.Button(self.submit_button, variant="primary")
|
243 |
+
|
244 |
+
submit.click(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
|
245 |
+
user_input.submit(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
|
246 |
+
reset.click(fn=self.reset_output, inputs=None, outputs=chatbot_output)
|
247 |
+
|
248 |
+
|
249 |
+
logging.info("Gradio interface created successfully.")
|
250 |
+
return demo
|