Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| import logging | |
| import json | |
| import gradio as gr | |
| from typing import Optional | |
| class ResponseManager: | |
| """ | |
| This class initializes the OpenAI client and provides methods to create responses, | |
| maintain conversation history, and handle user queries. | |
| """ | |
| def __init__(self, | |
| vector_store_id: Optional[str] = None, | |
| api_key: Optional[str] = None, | |
| meta_prompt_file: Optional[str] = None, | |
| model: str = "gpt-4o-mini", | |
| temperature: float = 0, | |
| max_output_tokens: int = 800, | |
| max_num_results: int = 15): | |
| """ | |
| Initialize the ResponseManager with optional parameters for configuration. | |
| :param vector_store_id: The ID of the vector store to use for file search. | |
| :param api_key: The OpenAI API key for authentication. | |
| :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt'). | |
| :param model: The OpenAI model to use (default: 'gpt-4o-mini'). | |
| :param temperature: The temperature for response generation (default: 0). | |
| :param max_output_tokens: The maximum number of output tokens (default: 800). | |
| :param max_num_results: The maximum number of search results to return (default: 15). | |
| """ | |
| # Load vector_store_id and api_key from environment variables if not provided | |
| self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID') | |
| if not self.vector_store_id: | |
| logging.error("VECTOR_STORE_ID is not provided or set in the environment.") | |
| raise ValueError("VECTOR_STORE_ID is required.") | |
| self.api_key = api_key or os.getenv('OPENAI_API_KEY') | |
| if not self.api_key: | |
| logging.error("OPENAI_API_KEY is not provided or set in the environment.") | |
| raise ValueError("OPENAI_API_KEY is required.") | |
| # Initialize other attributes | |
| self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt' | |
| self.previous_response_id = None | |
| # Initialize the OpenAI client | |
| self.client = openai.OpenAI(api_key=self.api_key) | |
| # Load the meta prompt from the specified file | |
| self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file) | |
| # Set default parameters for response generation | |
| self.model = model | |
| self.temperature = temperature | |
| self.max_output_tokens = max_output_tokens | |
| self.max_num_results = max_num_results | |
| def _load_meta_prompt(self, meta_prompt_file: str) -> str: | |
| """ | |
| Load the meta prompt from the specified file. | |
| :param meta_prompt_file: Path to the meta prompt file. | |
| :return: The meta prompt as a string. | |
| """ | |
| if not os.path.exists(meta_prompt_file): | |
| logging.error(f"Meta prompt file '{meta_prompt_file}' not found.") | |
| raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.") | |
| with open(meta_prompt_file, 'r', encoding='utf-8') as file: | |
| meta_prompt = file.read().strip() | |
| logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.") | |
| return meta_prompt | |
| def generate_response(self, query: str, history: list) -> tuple: | |
| """ | |
| Generate a response to a user query using the OpenAI API. | |
| This method interacts with the OpenAI API to create a response based on the user's query. | |
| It supports optional parameters for model configuration and handles errors gracefully. | |
| Args: | |
| query (str): The user query to respond to. | |
| history (list): The conversation history from the chatbot. | |
| Returns: | |
| tuple: (updated conversation list for display, updated conversation list for state) | |
| """ | |
| # Prepare the input for the API call | |
| input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else [] | |
| input_data.append({"role": "user", "content": query}) | |
| # Validate the query | |
| if not query.strip(): | |
| logging.warning("Empty or invalid query received.") | |
| warning_message = "Please enter a valid query." | |
| input_data.append({"role": "assistant", "content": warning_message}) | |
| new_history = history + input_data | |
| return new_history, new_history | |
| try: | |
| logging.info("Sending request to OpenAI API...") | |
| response = self.client.responses.create( | |
| model=self.model, | |
| previous_response_id=self.previous_response_id, | |
| input=input_data, | |
| tools=[{ | |
| "type": "file_search", | |
| "vector_store_ids": [self.vector_store_id], | |
| "max_num_results": self.max_num_results | |
| }], | |
| truncation="auto", | |
| temperature=self.temperature, | |
| max_output_tokens=self.max_output_tokens | |
| ) | |
| self.previous_response_id = response.id | |
| logging.info("Response received successfully.") | |
| input_data.append({"role": "assistant", "content": response.output_text}) | |
| new_history = history + input_data | |
| return new_history, new_history | |
| except Exception as e: | |
| logging.error(f"An error occurred while generating a response: {e}") | |
| error_message = "Sorry, I couldn't generate a response at this time. Please try again later." | |
| input_data.append({"role": "assistant", "content": error_message}) | |
| new_history = history + input_data | |
| return new_history, new_history | |
| class ChatbotInterface: | |
| def init(self, | |
| config_path: str = 'config/gradio_config.json', | |
| model: str = "gpt-4o-mini", | |
| temperature: float = 0, | |
| max_output_tokens: int = 800, | |
| max_num_results: int = 15, | |
| vector_store_id: Optional[str] = None, | |
| api_key: Optional[str] = None, | |
| meta_prompt_file: Optional[str] = None): | |
| """ | |
| Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager. | |
| :param config_path: Path to the configuration JSON file. | |
| :param model: The OpenAI model to use (default: 'gpt-4o-mini'). | |
| :param temperature: The temperature for response generation (default: 0). | |
| :param max_output_tokens: The maximum number of output tokens (default: 800). | |
| :param max_num_results: The maximum number of search results to return (default: 15). | |
| :param vector_store_id: The ID of the vector store to use for file search. | |
| :param api_key: The OpenAI API key for authentication. | |
| :param meta_prompt_file: Path to the meta prompt file. | |
| """ | |
| self.config = self.load_config(config_path) | |
| self.title = self.config["chatbot_title"] | |
| self.description = self.config["chatbot_description"] | |
| self.input_label = self.config["chatbot_input_label"] | |
| self.input_placeholder = self.config["chatbot_input_placeholder"] | |
| self.output_label = self.config["chatbot_output_label"] | |
| self.reset_button = self.config["chatbot_reset_button"] | |
| self.submit_button = self.config["chatbot_submit_button"] | |
| # Initialize ResponseManager with custom parameters | |
| try: | |
| self.response_manager = ResponseManager( | |
| model=model, | |
| temperature=temperature, | |
| max_output_tokens=max_output_tokens, | |
| max_num_results=max_num_results, | |
| vector_store_id=vector_store_id, | |
| api_key=api_key, | |
| meta_prompt_file=meta_prompt_file | |
| ) | |
| self.generate_response = self.response_manager.generate_response | |
| logging.info( | |
| "ChatbotInterface initialized with the following parameters:\n" | |
| f" - Model: {model}\n" | |
| f" - Temperature: {temperature}\n" | |
| f" - Max Output Tokens: {max_output_tokens}\n" | |
| f" - Max Number of Results: {max_num_results}\n" | |
| f" - Vector Store ID: {vector_store_id}\n" | |
| f" - API Key: {'Provided' if api_key else 'Not Provided'}\n" | |
| f" - Meta Prompt File: {meta_prompt_file or 'Default'}" | |
| ) | |
| except Exception as e: | |
| logging.error(f"Failed to initialize ResponseManager: {e}") | |
| raise | |
| def load_config(config_path: str) -> dict: | |
| """ | |
| Load the configuration for Gradio GUI interface from the JSON file. | |
| :param config_path: Path to the configuration JSON file. | |
| :return: Configuration dictionary. | |
| """ | |
| logging.info(f"Loading configuration from {config_path}...") | |
| if not os.path.exists(config_path): | |
| logging.error(f"Configuration file not found: {config_path}") | |
| raise FileNotFoundError(f"Configuration file not found: {config_path}") | |
| with open(config_path, 'r') as config_file: | |
| config = json.load(config_file) | |
| required_keys = [ | |
| "chatbot_title", "chatbot_description", "chatbot_input_label", | |
| "chatbot_input_placeholder", "chatbot_output_label", | |
| "chatbot_reset_button", "chatbot_submit_button" | |
| ] | |
| for key in required_keys: | |
| if key not in config: | |
| logging.error(f"Missing required configuration key: {key}") | |
| raise ValueError(f"Missing required configuration key: {key}") | |
| logging.info("Configuration loaded successfully.") | |
| return config | |
| def reset_output(self) -> list: | |
| """ | |
| Reset the chatbot output. | |
| :return: An empty list to reset the output. | |
| """ | |
| return [] | |
| def create_interface(self) -> gr.Blocks: | |
| """ | |
| Create the Gradio Blocks interface. | |
| :return: A Gradio Blocks interface object. | |
| """ | |
| logging.info("Creating Gradio interface...") | |
| # Define the Gradio Blocks interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown(f"## {self.title}\n{self.description}") | |
| # Chatbot history component | |
| chatbot_output = gr.Chatbot(label=self.output_label, type="messages") | |
| # Adding a session-specific state to store conversation history. | |
| conversation_state = gr.State([]) | |
| # User input | |
| user_input = gr.Textbox( | |
| lines=2, | |
| label=self.input_label, | |
| placeholder=self.input_placeholder | |
| ) | |
| # Buttons | |
| with gr.Row(): | |
| reset = gr.Button(self.reset_button, variant="secondary") | |
| submit = gr.Button(self.submit_button, variant="primary") | |
| submit.click(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state]) | |
| user_input.submit(fn=self.generate_response, inputs=[user_input, conversation_state], outputs=[chatbot_output, conversation_state]) | |
| reset.click(fn=self.reset_output, inputs=None, outputs=chatbot_output) | |
| logging.info("Gradio interface created successfully.") | |
| return demo |