Spaces:
Build error
Build error
import os | |
import openai | |
import logging | |
from typing import Optional | |
class ResponseManager: | |
""" | |
This class initializes the OpenAI client and provides methods to create responses, | |
maintain conversation history, and handle user queries. | |
""" | |
def __init__(self, | |
vector_store_id: Optional[str] = None, | |
api_key: Optional[str] = None, | |
meta_prompt_file: Optional[str] = None, | |
model: str = "gpt-4o-mini", | |
temperature: float = 0, | |
max_output_tokens: int = 800, | |
max_num_results: int = 15): | |
""" | |
Initialize the ResponseManager with optional parameters for configuration. | |
:param vector_store_id: The ID of the vector store to use for file search. | |
:param api_key: The OpenAI API key for authentication. | |
:param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt'). | |
:param model: The OpenAI model to use (default: 'gpt-4o-mini'). | |
:param temperature: The temperature for response generation (default: 0). | |
:param max_output_tokens: The maximum number of output tokens (default: 800). | |
:param max_num_results: The maximum number of search results to return (default: 15). | |
""" | |
# Load vector_store_id and api_key from environment variables if not provided | |
self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID') | |
if not self.vector_store_id: | |
logging.error("VECTOR_STORE_ID is not provided or set in the environment.") | |
raise ValueError("VECTOR_STORE_ID is required.") | |
self.api_key = api_key or os.getenv('OPENAI_API_KEY') | |
if not self.api_key: | |
logging.error("OPENAI_API_KEY is not provided or set in the environment.") | |
raise ValueError("OPENAI_API_KEY is required.") | |
# Initialize other attributes | |
self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt' | |
self.previous_response_id = None | |
# Initialize the OpenAI client | |
self.client = openai.OpenAI(api_key=self.api_key) | |
# Load the meta prompt from the specified file | |
self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file) | |
# Set default parameters for response generation | |
self.model = model | |
self.temperature = temperature | |
self.max_output_tokens = max_output_tokens | |
self.max_num_results = max_num_results | |
def _load_meta_prompt(self, meta_prompt_file: str) -> str: | |
""" | |
Load the meta prompt from the specified file. | |
:param meta_prompt_file: Path to the meta prompt file. | |
:return: The meta prompt as a string. | |
""" | |
if not os.path.exists(meta_prompt_file): | |
logging.error(f"Meta prompt file '{meta_prompt_file}' not found.") | |
raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.") | |
with open(meta_prompt_file, 'r', encoding='utf-8') as file: | |
meta_prompt = file.read().strip() | |
logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.") | |
return meta_prompt | |
def generate_response(self, query: str, history: list) -> list: | |
""" | |
Generate a response to a user query using the OpenAI API. | |
This method interacts with the OpenAI API to create a response based on the user's query. | |
It supports optional parameters for model configuration and handles errors gracefully. | |
Args: | |
query (str): The user query to respond to. | |
history (list): The conversation history from the chatbot. | |
Returns: | |
list: A list of dictionaries representing the conversation, including the generated response. | |
""" | |
# Prepare the input for the API call | |
input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else [] | |
input_data.append({"role": "user", "content": query}) | |
# Validate the query | |
if not query.strip(): | |
logging.warning("Empty or invalid query received.") | |
warning_message = "Please enter a valid query." | |
input_data.append({"role": "assistant", "content": warning_message}) | |
return history + input_data | |
try: | |
logging.info("Sending request to OpenAI API...") | |
response = self.client.responses.create( | |
model=self.model, | |
previous_response_id=self.previous_response_id, | |
input=input_data, | |
tools=[{ | |
"type": "file_search", | |
"vector_store_ids": [self.vector_store_id], | |
"max_num_results": self.max_num_results | |
}], | |
temperature=self.temperature, | |
max_output_tokens=self.max_output_tokens | |
) | |
self.previous_response_id = response.id | |
logging.info("Response received successfully.") | |
input_data.append({"role": "assistant", "content": response.output_text}) | |
return history + input_data | |
except Exception as e: | |
logging.error(f"An error occurred while generating a response: {e}") | |
error_message = "Sorry, I couldn't generate a response at this time. Please try again later." | |
input_data.append({"role": "assistant", "content": error_message}) | |
return history + input_data |