Spaces:
Sleeping
Sleeping
import os | |
import openai | |
import logging | |
from typing import Optional | |
""" | |
This module manages responses from the OpenAI Response API for an IT Helpdesk assistant | |
at Harvey Mudd College. It initializes the OpenAI client and provides methods to generate | |
responses using Retrieval-Augmented Generation (RAG). The module leverages a vector store | |
to retrieve relevant knowledge base documents and uses the specified OpenAI model to | |
generate responses. Additionally, it loads a meta prompt from a configuration file to | |
enhance the AI model's contextual understanding. | |
""" | |
# Configure logging to both file and console | |
log_file_path = "logs/response_manager.log" | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s - %(levelname)s - %(message)s", | |
handlers=[ | |
logging.FileHandler(log_file_path, mode='a', encoding='utf-8'), # Save logs to a file | |
logging.StreamHandler() # Print logs to the console | |
] | |
) | |
# Load the OpenAI API key from the environment variable | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
logging.error("OPENAI_API_KEY environment variable is not set.") | |
raise ValueError("OPENAI_API_KEY environment variable is not set.") | |
class ResponseManager: | |
""" | |
A class to manage responses from the OpenAI API for an IT Helpdesk assistant. | |
This class initializes the OpenAI client and provides a method to create responses | |
to user queries using the specified OpenAI model. | |
""" | |
DEFAULT_META_PROMPT_FILE = 'config/meta_prompt.txt' | |
DEFAULT_MODEL = "gpt-4o-mini" | |
DEFAULT_TEMPERATURE = 0 | |
DEFAULT_MAX_OUTPUT_TOKENS = 800 | |
DEFAULT_MAX_NUM_RESULTS = 15 | |
def __init__(self, vector_store_id: str, meta_prompt_file: Optional[str] = None): | |
""" | |
Initialize the ResponseManager with a vector store ID and meta prompt file. | |
:param vector_store_id: The ID of the vector store to use for file search. | |
:param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt'). | |
""" | |
if not vector_store_id: | |
logging.error("Vector store ID is not provided.") | |
raise ValueError("Vector store ID cannot be empty.") | |
self.vector_store_id = vector_store_id | |
self.meta_prompt_file = meta_prompt_file or self.DEFAULT_META_PROMPT_FILE | |
self.client = openai.OpenAI(api_key=api_key) | |
self.previous_response_id = None | |
# Load the meta prompt from the specified file | |
self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file) | |
def _load_meta_prompt(self, meta_prompt_file: str) -> str: | |
""" | |
Load the meta prompt from the specified file. | |
:param meta_prompt_file: Path to the meta prompt file. | |
:return: The meta prompt as a string. | |
""" | |
if not os.path.exists(meta_prompt_file): | |
logging.error(f"Meta prompt file '{meta_prompt_file}' not found.") | |
raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.") | |
with open(meta_prompt_file, 'r', encoding='utf-8') as file: | |
meta_prompt = file.read().strip() | |
logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.") | |
return meta_prompt | |
def create_response(self, query: str, model: Optional[str] = None, | |
temperature: Optional[float] = None, max_output_tokens: Optional[int] = None, | |
max_num_results: Optional[int] = None) -> str: | |
""" | |
Create a response to a user query using the OpenAI API. | |
:param query: The user query to respond to. | |
:param model: The OpenAI model to use (default is "gpt-4o-mini"). | |
:param temperature: The temperature for the response (default is 0). | |
:param max_output_tokens: The maximum number of output tokens (default is 800). | |
:param max_num_results: The maximum number of search results to return (default is 15). | |
:return: The response text from the OpenAI API. | |
""" | |
if not query.strip(): | |
logging.error("Query is empty or invalid.") | |
raise ValueError("Query cannot be empty.") | |
model = model or self.DEFAULT_MODEL | |
temperature = temperature if temperature is not None else self.DEFAULT_TEMPERATURE | |
max_output_tokens = max_output_tokens if max_output_tokens is not None else self.DEFAULT_MAX_OUTPUT_TOKENS | |
max_num_results = max_num_results if max_num_results is not None else self.DEFAULT_MAX_NUM_RESULTS | |
# Prepare the input for the API call | |
input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else [] | |
input_data.append({"role": "user", "content": query}) | |
try: | |
logging.info("Sending request to OpenAI API...") | |
response = self.client.responses.create( | |
model=model, | |
previous_response_id=self.previous_response_id, | |
input=input_data, | |
tools=[{ | |
"type": "file_search", | |
"vector_store_ids": [self.vector_store_id], | |
"max_num_results": max_num_results | |
}], | |
temperature=temperature, | |
max_output_tokens=max_output_tokens | |
) | |
self.previous_response_id = response.id | |
logging.info("Response received successfully.") | |
return response.output_text | |
except openai.error.OpenAIError as e: | |
logging.error(f"OpenAI API error: {e}") | |
raise RuntimeError(f"Failed to generate response: {e}") | |
except Exception as e: | |
logging.error(f"Unexpected error: {e}") | |
raise RuntimeError(f"An unexpected error occurred: {e}") |