Spaces:
Sleeping
Sleeping
import os | |
import openai | |
import logging | |
from typing import Optional | |
class ResponseManager: | |
""" | |
This class initializes the OpenAI client and provides methods to create responses, | |
maintain conversation history, and handle user queries. | |
Attributes: | |
DEFAULT_META_PROMPT_FILE (str): Default path to the meta prompt file. | |
DEFAULT_MODEL (str): Default OpenAI model to use. | |
DEFAULT_TEMPERATURE (float): Default temperature for response generation. | |
DEFAULT_MAX_OUTPUT_TOKENS (int): Default maximum number of output tokens. | |
DEFAULT_MAX_NUM_RESULTS (int): Default maximum number of search results. | |
Methods: | |
__init__(vector_store_id: Optional[str], api_key: Optional[str], meta_prompt_file: Optional[str]): | |
Initializes the ResponseManager with a vector store ID, API key, and meta prompt file. | |
_load_meta_prompt(meta_prompt_file: str) -> str: | |
Loads the meta prompt from the specified file. | |
create_response(query: str, model: Optional[str], temperature: Optional[float], | |
max_output_tokens: Optional[int], max_num_results: Optional[int]) -> str: | |
Creates a response to a user query using the OpenAI API. | |
conversation(query: str, model: Optional[str], temperature: Optional[float], | |
max_output_tokens: Optional[int], max_num_results: Optional[int], | |
Handles chatbot interaction and maintains conversation history. | |
""" | |
DEFAULT_META_PROMPT_FILE = 'config/meta_prompt.txt' | |
DEFAULT_MODEL = "gpt-4o-mini" | |
DEFAULT_TEMPERATURE = 0 | |
DEFAULT_MAX_OUTPUT_TOKENS = 800 | |
DEFAULT_MAX_NUM_RESULTS = 15 | |
def __init__(self, vector_store_id: Optional[str] = None, api_key: Optional[str] = None, meta_prompt_file: Optional[str] = None): | |
""" | |
Initialize the ResponseManager with a vector store ID, API key, and meta prompt file. | |
:param vector_store_id: The ID of the vector store to use for file search. | |
:param api_key: The OpenAI API key for authentication. | |
:param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt'). | |
""" | |
# Load vector_store_id and api_key from environment variables if not provided | |
self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID') | |
if not self.vector_store_id: | |
logging.error("VECTOR_STORE_ID is not provided or set in the environment.") | |
raise ValueError("VECTOR_STORE_ID is required.") | |
self.api_key = api_key or os.getenv('OPENAI_API_KEY') | |
if not self.api_key: | |
logging.error("OPENAI_API_KEY is not provided or set in the environment.") | |
raise ValueError("OPENAI_API_KEY is required.") | |
# Initialize other attributes | |
self.meta_prompt_file = meta_prompt_file or self.DEFAULT_META_PROMPT_FILE | |
self.previous_response_id = None | |
# Initialize the OpenAI client | |
self.client = openai.OpenAI(api_key=self.api_key) | |
# Load the meta prompt from the specified file | |
self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file) | |
def _load_meta_prompt(self, meta_prompt_file: str) -> str: | |
""" | |
Load the meta prompt from the specified file. | |
:param meta_prompt_file: Path to the meta prompt file. | |
:return: The meta prompt as a string. | |
""" | |
if not os.path.exists(meta_prompt_file): | |
logging.error(f"Meta prompt file '{meta_prompt_file}' not found.") | |
raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.") | |
with open(meta_prompt_file, 'r', encoding='utf-8') as file: | |
meta_prompt = file.read().strip() | |
logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.") | |
return meta_prompt | |
def create_response(self, query: str, | |
model: Optional[str] = None, | |
temperature: Optional[float] = None, | |
max_output_tokens: Optional[int] = None, | |
max_num_results: Optional[int] = None) -> str: | |
""" | |
Create a response to a user query using the OpenAI API. | |
:param query: The user query to respond to. | |
:param model: The OpenAI model to use (default is "gpt-4o-mini"). | |
:param temperature: The temperature for the response (default is 0). | |
:param max_output_tokens: The maximum number of output tokens (default is 800). | |
:param max_num_results: The maximum number of search results to return (default is 15). | |
:return: The response text from the OpenAI API. | |
""" | |
# Prepare the input for the API call | |
input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else [] | |
input_data.append({"role": "user", "content": query}) | |
# Validate the query | |
if not query.strip(): | |
logging.warning("Empty or invalid query received.") | |
warning_message = "Please enter a valid query." | |
input_data.append({"role": "assistant", "content": warning_message}) | |
return input_data | |
# Set default values for optional parameters | |
model = model or self.DEFAULT_MODEL | |
temperature = temperature if temperature is not None else self.DEFAULT_TEMPERATURE | |
max_output_tokens = max_output_tokens if max_output_tokens is not None else self.DEFAULT_MAX_OUTPUT_TOKENS | |
max_num_results = max_num_results if max_num_results is not None else self.DEFAULT_MAX_NUM_RESULTS | |
try: | |
logging.info("Sending request to OpenAI API...") | |
response = self.client.responses.create( | |
model=model, | |
previous_response_id=self.previous_response_id, | |
input=input_data, | |
tools=[{ | |
"type": "file_search", | |
"vector_store_ids": [self.vector_store_id], | |
"max_num_results": max_num_results | |
}], | |
temperature=temperature, | |
max_output_tokens=max_output_tokens | |
) | |
self.previous_response_id = response.id | |
logging.info("Response received successfully.") | |
input_data.append({"role": "assistant", "content": response.output_text}) | |
return input_data | |
except Exception as e: | |
logging.error(f"An error occurred while generating a response: {e}") | |
error_message = "Sorry, I couldn't generate a response at this time. Please try again later." | |
input_data.append({"role": "assistant", "content": error_message}) | |
return input_data | |
def conversation(self, query: str, history: list, | |
model: Optional[str] = None, | |
temperature: Optional[float] = None, | |
max_output_tokens: Optional[int] = None, | |
max_num_results: Optional[int] = None | |
) -> list: | |
""" | |
Function to handle the chatbot interaction and maintain conversation history. | |
:param query: The user query to respond to. | |
:param model: The OpenAI model to use (default is "gpt-4o-mini"). | |
:param temperature: The temperature for the response (default is 0). | |
:param max_output_tokens: The maximum number of output tokens (default is 800). | |
:param max_num_results: The maximum number of search results to return (default is 15). | |
:param history: The conversation history (list of [input, output] pairs). | |
:return: Updated conversation history. | |
""" | |
logging.info("Received query: %s", query) | |
try: | |
# Set default values for optional parameters | |
model = model or self.DEFAULT_MODEL | |
temperature = temperature if temperature is not None else self.DEFAULT_TEMPERATURE | |
max_output_tokens = max_output_tokens if max_output_tokens is not None else self.DEFAULT_MAX_OUTPUT_TOKENS | |
max_num_results = max_num_results if max_num_results is not None else self.DEFAULT_MAX_NUM_RESULTS | |
# Generate a response using the create_response method | |
logging.info("Generating response for the query...") | |
response = self.create_response( | |
query=query, | |
model=model, | |
temperature=temperature, | |
max_output_tokens=max_output_tokens, | |
max_num_results=max_num_results | |
) | |
logging.info("Response generated successfully.") | |
history.append(response) | |
return history | |
except Exception as e: | |
# Log the error and append it to the conversation history | |
logging.error("An error occurred while generating a response: %s", str(e)) | |
# history.append((query, f"An error occurred: {str(e)}")) | |
return history | |