Spaces:
Sleeping
Sleeping
File size: 11,450 Bytes
1e24329 cc5168c 1e24329 cc5168c 1e24329 cc5168c 1e24329 bfe31bc cc5168c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
import os
import openai
import logging
import json
import gradio as gr
from typing import Optional
class ResponseManager:
"""
This class initializes the OpenAI client and provides methods to create responses,
maintain conversation history, and handle user queries.
"""
def __init__(self,
vector_store_id: Optional[str] = None,
api_key: Optional[str] = None,
meta_prompt_file: Optional[str] = None,
model: str = "gpt-4o-mini",
temperature: float = 0,
max_output_tokens: int = 800,
max_num_results: int = 15):
"""
Initialize the ResponseManager with optional parameters for configuration.
:param vector_store_id: The ID of the vector store to use for file search.
:param api_key: The OpenAI API key for authentication.
:param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
:param model: The OpenAI model to use (default: 'gpt-4o-mini').
:param temperature: The temperature for response generation (default: 0).
:param max_output_tokens: The maximum number of output tokens (default: 800).
:param max_num_results: The maximum number of search results to return (default: 15).
"""
# Load vector_store_id and api_key from environment variables if not provided
self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
if not self.vector_store_id:
logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
raise ValueError("VECTOR_STORE_ID is required.")
self.api_key = api_key or os.getenv('OPENAI_API_KEY')
if not self.api_key:
logging.error("OPENAI_API_KEY is not provided or set in the environment.")
raise ValueError("OPENAI_API_KEY is required.")
# Initialize other attributes
self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt'
self.previous_response_id = None
# Initialize the OpenAI client
self.client = openai.OpenAI(api_key=self.api_key)
# Load the meta prompt from the specified file
self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)
# Set default parameters for response generation
self.model = model
self.temperature = temperature
self.max_output_tokens = max_output_tokens
self.max_num_results = max_num_results
def _load_meta_prompt(self, meta_prompt_file: str) -> str:
"""
Load the meta prompt from the specified file.
:param meta_prompt_file: Path to the meta prompt file.
:return: The meta prompt as a string.
"""
if not os.path.exists(meta_prompt_file):
logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
with open(meta_prompt_file, 'r', encoding='utf-8') as file:
meta_prompt = file.read().strip()
logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
return meta_prompt
def generate_response(self, query: str, history: list) -> tuple:
"""
Generate a response to a user query using the OpenAI API.
This method interacts with the OpenAI API to create a response based on the user's query.
It supports optional parameters for model configuration and handles errors gracefully.
Args:
query (str): The user query to respond to.
history (list): The conversation history from the chatbot.
Returns:
tuple: (updated conversation list for display, updated conversation list for state)
"""
# Prepare the input for the API call
input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
input_data.append({"role": "user", "content": query})
# Validate the query
if not query.strip():
logging.warning("Empty or invalid query received.")
warning_message = "Please enter a valid query."
input_data.append({"role": "assistant", "content": warning_message})
new_history = history + input_data
return new_history, new_history
try:
logging.info("Sending request to OpenAI API...")
response = self.client.responses.create(
model=self.model,
previous_response_id=self.previous_response_id,
input=input_data,
tools=[{
"type": "file_search",
"vector_store_ids": [self.vector_store_id],
"max_num_results": self.max_num_results
}],
truncation="auto",
temperature=self.temperature,
max_output_tokens=self.max_output_tokens
)
self.previous_response_id = response.id
logging.info("Response received successfully.")
input_data.append({"role": "assistant", "content": response.output_text})
new_history = history + input_data
return new_history, new_history
except Exception as e:
logging.error(f"An error occurred while generating a response: {e}")
error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
input_data.append({"role": "assistant", "content": error_message})
new_history = history + input_data
return new_history, new_history
class ChatbotInterface:
def init(self,
config_path: str = 'config/gradio_config.json',
model: str = "gpt-4o-mini",
temperature: float = 0,
max_output_tokens: int = 800,
max_num_results: int = 15,
vector_store_id: Optional[str] = None,
api_key: Optional[str] = None,
meta_prompt_file: Optional[str] = None):
"""
Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager.
:param config_path: Path to the configuration JSON file.
:param model: The OpenAI model to use (default: 'gpt-4o-mini').
:param temperature: The temperature for response generation (default: 0).
:param max_output_tokens: The maximum number of output tokens (default: 800).
:param max_num_results: The maximum number of search results to return (default: 15).
:param vector_store_id: The ID of the vector store to use for file search.
:param api_key: The OpenAI API key for authentication.
:param meta_prompt_file: Path to the meta prompt file.
"""
self.config = self.load_config(config_path)
self.title = self.config["chatbot_title"]
self.description = self.config["chatbot_description"]
self.input_label = self.config["chatbot_input_label"]
self.input_placeholder = self.config["chatbot_input_placeholder"]
self.output_label = self.config["chatbot_output_label"]
self.reset_button = self.config["chatbot_reset_button"]
self.submit_button = self.config["chatbot_submit_button"]
# Initialize ResponseManager with custom parameters
try:
self.response_manager = ResponseManager(
model=model,
temperature=temperature,
max_output_tokens=max_output_tokens,
max_num_results=max_num_results,
vector_store_id=vector_store_id,
api_key=api_key,
meta_prompt_file=meta_prompt_file
)
self.generate_response = self.response_manager.generate_response
logging.info(
"ChatbotInterface initialized with the following parameters:\n"
f" - Model: {model}\n"
f" - Temperature: {temperature}\n"
f" - Max Output Tokens: {max_output_tokens}\n"
f" - Max Number of Results: {max_num_results}\n"
f" - Vector Store ID: {vector_store_id}\n"
f" - API Key: {'Provided' if api_key else 'Not Provided'}\n"
f" - Meta Prompt File: {meta_prompt_file or 'Default'}"
)
except Exception as e:
logging.error(f"Failed to initialize ResponseManager: {e}")
raise
@staticmethod
def load_config(config_path: str) -> dict:
"""
Load the configuration for Gradio GUI interface from the JSON file.
:param config_path: Path to the configuration JSON file.
:return: Configuration dictionary.
"""
logging.info(f"Loading configuration from {config_path}...")
if not os.path.exists(config_path):
logging.error(f"Configuration file not found: {config_path}")
raise FileNotFoundError(f"Configuration file not found: {config_path}")
with open(config_path, 'r') as config_file:
config = json.load(config_file)
required_keys = [
"chatbot_title", "chatbot_description", "chatbot_input_label",
"chatbot_input_placeholder", "chatbot_output_label",
"chatbot_reset_button", "chatbot_submit_button"
]
for key in required_keys:
if key not in config:
logging.error(f"Missing required configuration key: {key}")
raise ValueError(f"Missing required configuration key: {key}")
logging.info("Configuration loaded successfully.")
return config
def reset_output(self) -> list:
"""
Reset the chatbot output.
:return: An empty list to reset the output.
"""
return []
def create_interface(self) -> gr.Blocks:
"""
Create the Gradio Blocks interface.
:return: A Gradio Blocks interface object.
"""
logging.info("Creating Gradio interface...")
# Define the Gradio Blocks interface
with gr.Blocks() as demo:
gr.Markdown(f"## {self.title}\n{self.description}")
# Chatbot history component
chatbot_output = gr.Chatbot(label=self.output_label, type="messages")
# Adding a session-specific state to store conversation history.
conversation_state = gr.State([])
# User input
user_input = gr.Textbox(
lines=2,
label=self.input_label,
placeholder=self.input_placeholder
)
# Buttons
with gr.Row():
reset = gr.Button(self.reset_button, variant="secondary")
submit = gr.Button(self.submit_button, variant="primary")
submit.click(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
user_input.submit(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state])
reset.click(fn=self.reset_output, inputs=None, outputs=chatbot_output)
logging.info("Gradio interface created successfully.")
return demo |