File size: 5,516 Bytes
a5a8279
 
c402e13
bc388c3
 
a5a8279
 
b2821a8
 
a5a8279
c402e13
efcd67e
 
 
 
 
 
 
 
a5a8279
efcd67e
a5a8279
5270685
c402e13
efcd67e
 
 
 
a5a8279
5270685
 
 
 
 
 
 
 
 
 
bc388c3
5270685
c2fbd9e
a5a8279
c402e13
5270685
 
 
c402e13
bc388c3
 
efcd67e
 
 
 
 
 
bc388c3
 
 
 
 
 
3f14ec0
c402e13
a5a8279
bc388c3
 
 
 
a5a8279
caf13db
a5a8279
caf13db
 
 
 
 
 
 
 
a5a8279
fd7364c
 
 
 
 
c402e13
fd7364c
 
 
caf13db
bc388c3
c402e13
 
 
efcd67e
c402e13
 
 
 
 
efcd67e
c402e13
efcd67e
 
c402e13
 
 
fd7364c
caf13db
a5a8279
5270685
 
fd7364c
 
efcd67e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import os
import openai
import logging
from typing import Optional

class ResponseManager:
    """
    This class initializes the OpenAI client and provides methods to create responses, 
    maintain conversation history, and handle user queries.
    """

    def __init__(self, 
                 vector_store_id: Optional[str] = None, 
                 api_key: Optional[str] = None, 
                 meta_prompt_file: Optional[str] = None,
                 model: str = "gpt-4o-mini",
                 temperature: float = 0,
                 max_output_tokens: int = 800,
                 max_num_results: int = 15):
        """
        Initialize the ResponseManager with optional parameters for configuration.
        :param vector_store_id: The ID of the vector store to use for file search.
        :param api_key: The OpenAI API key for authentication.
        :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
        :param model: The OpenAI model to use (default: 'gpt-4o-mini').
        :param temperature: The temperature for response generation (default: 0).
        :param max_output_tokens: The maximum number of output tokens (default: 800).
        :param max_num_results: The maximum number of search results to return (default: 15).
        """
        # Load vector_store_id and api_key from environment variables if not provided
        self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
        if not self.vector_store_id:
            logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
            raise ValueError("VECTOR_STORE_ID is required.")

        self.api_key = api_key or os.getenv('OPENAI_API_KEY')
        if not self.api_key:
            logging.error("OPENAI_API_KEY is not provided or set in the environment.")
            raise ValueError("OPENAI_API_KEY is required.")

        # Initialize other attributes
        self.meta_prompt_file = meta_prompt_file or 'config/meta_prompt.txt'
        self.previous_response_id = None

        # Initialize the OpenAI client
        self.client = openai.OpenAI(api_key=self.api_key)

        # Load the meta prompt from the specified file
        self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)

        # Set default parameters for response generation
        self.model = model
        self.temperature = temperature
        self.max_output_tokens = max_output_tokens
        self.max_num_results = max_num_results

    def _load_meta_prompt(self, meta_prompt_file: str) -> str:
        """
        Load the meta prompt from the specified file.
        :param meta_prompt_file: Path to the meta prompt file.
        :return: The meta prompt as a string.
        """
        if not os.path.exists(meta_prompt_file):
            logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
            raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
        with open(meta_prompt_file, 'r', encoding='utf-8') as file:
            meta_prompt = file.read().strip()
        logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
        return meta_prompt

    def generate_response(self, query: str, history: list) -> list:
        """
        Generate a response to a user query using the OpenAI API.
        This method interacts with the OpenAI API to create a response based on the user's query.
        It supports optional parameters for model configuration and handles errors gracefully.
        Args:
            query (str): The user query to respond to.
            history (list): The conversation history from the chatbot.
        Returns:
            list: A list of dictionaries representing the conversation, including the generated response.
        """
        # Prepare the input for the API call
        input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
        input_data.append({"role": "user", "content": query})
        
        # Validate the query
        if not query.strip():
            logging.warning("Empty or invalid query received.")
            warning_message = "Please enter a valid query."
            input_data.append({"role": "assistant", "content": warning_message})
            return history + input_data

        try:
            logging.info("Sending request to OpenAI API...")
            response = self.client.responses.create(
                model=self.model,
                previous_response_id=self.previous_response_id,
                input=input_data,
                tools=[{
                    "type": "file_search",
                    "vector_store_ids": [self.vector_store_id],
                    "max_num_results": self.max_num_results
                }],
                temperature=self.temperature,
                max_output_tokens=self.max_output_tokens
            )
            self.previous_response_id = response.id
            logging.info("Response received successfully.")
            input_data.append({"role": "assistant", "content": response.output_text})
            return history + input_data

        except Exception as e:
            logging.error(f"An error occurred while generating a response: {e}")
            error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
            input_data.append({"role": "assistant", "content": error_message})
            return history + input_data