File size: 6,213 Bytes
a5a8279
 
c402e13
bc388c3
 
 
a5a8279
 
b2821a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5a8279
c402e13
bc388c3
 
 
 
 
 
5270685
 
a5a8279
5270685
a5a8279
5270685
c402e13
a5a8279
5270685
 
 
 
 
 
 
 
 
 
bc388c3
5270685
bc388c3
a5a8279
c402e13
5270685
 
 
c402e13
bc388c3
 
 
 
 
 
 
 
3f14ec0
c402e13
a5a8279
bc388c3
 
 
 
a5a8279
caf13db
a5a8279
caf13db
 
 
 
 
 
 
 
a5a8279
fd7364c
 
 
 
 
c402e13
fd7364c
 
 
caf13db
bc388c3
c402e13
 
 
caf13db
c402e13
 
 
 
 
caf13db
c402e13
caf13db
 
c402e13
 
 
fd7364c
caf13db
a5a8279
5270685
 
fd7364c
 
caf13db
5270685
fd7364c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import os
import openai
import logging
from typing import Optional


class ResponseManager:
    """
    This class initializes the OpenAI client and provides methods to create responses, 
    maintain conversation history, and handle user queries.
    Attributes:
        DEFAULT_META_PROMPT_FILE (str): Default path to the meta prompt file.
        DEFAULT_MODEL (str): Default OpenAI model to use.
        DEFAULT_TEMPERATURE (float): Default temperature for response generation.
        DEFAULT_MAX_OUTPUT_TOKENS (int): Default maximum number of output tokens.
        DEFAULT_MAX_NUM_RESULTS (int): Default maximum number of search results.
    Methods:
        __init__(vector_store_id: Optional[str], api_key: Optional[str], meta_prompt_file: Optional[str]):
            Initializes the ResponseManager with a vector store ID, API key, and meta prompt file.
        _load_meta_prompt(meta_prompt_file: str) -> str:
            Loads the meta prompt from the specified file.
        create_response(query: str, model: Optional[str], temperature: Optional[float], 
                        max_output_tokens: Optional[int], max_num_results: Optional[int]) -> str:
            Creates a response to a user query using the OpenAI API.
        conversation(query: str, model: Optional[str], temperature: Optional[float], 
                     max_output_tokens: Optional[int], max_num_results: Optional[int], 
            Handles chatbot interaction and maintains conversation history.
    """

    DEFAULT_META_PROMPT_FILE = 'config/meta_prompt.txt'
    DEFAULT_MODEL = "gpt-4o-mini"
    DEFAULT_TEMPERATURE = 0
    DEFAULT_MAX_OUTPUT_TOKENS = 800
    DEFAULT_MAX_NUM_RESULTS = 15

 
    def __init__(self, vector_store_id: Optional[str] = None, api_key: Optional[str] = None, meta_prompt_file: Optional[str] = None):
        """
        Initialize the ResponseManager with a vector store ID, API key, and meta prompt file.
        :param vector_store_id: The ID of the vector store to use for file search.
        :param api_key: The OpenAI API key for authentication.
        :param meta_prompt_file: Path to the meta prompt file (default: 'config/meta_prompt.txt').
        """
        # Load vector_store_id and api_key from environment variables if not provided
        self.vector_store_id = vector_store_id or os.getenv('VECTOR_STORE_ID')
        if not self.vector_store_id:
            logging.error("VECTOR_STORE_ID is not provided or set in the environment.")
            raise ValueError("VECTOR_STORE_ID is required.")

        self.api_key = api_key or os.getenv('OPENAI_API_KEY')
        if not self.api_key:
            logging.error("OPENAI_API_KEY is not provided or set in the environment.")
            raise ValueError("OPENAI_API_KEY is required.")

        # Initialize other attributes
        self.meta_prompt_file = meta_prompt_file or self.DEFAULT_META_PROMPT_FILE
        self.previous_response_id = None

        # Initialize the OpenAI client
        self.client = openai.OpenAI(api_key=self.api_key)

        # Load the meta prompt from the specified file
        self.meta_prompt = self._load_meta_prompt(self.meta_prompt_file)

    def _load_meta_prompt(self, meta_prompt_file: str) -> str:
        """
        Load the meta prompt from the specified file.
        :param meta_prompt_file: Path to the meta prompt file.
        :return: The meta prompt as a string.
        """
        if not os.path.exists(meta_prompt_file):
            logging.error(f"Meta prompt file '{meta_prompt_file}' not found.")
            raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.")
        with open(meta_prompt_file, 'r', encoding='utf-8') as file:
            meta_prompt = file.read().strip()
        logging.info(f"Meta prompt loaded successfully from '{meta_prompt_file}'.")
        return meta_prompt

    def generate_response(self, query: str, history: list) -> list:
        """
        Generate a response to a user query using the OpenAI API.
        This method interacts with the OpenAI API to create a response based on the user's query.
        It supports optional parameters for model configuration and handles errors gracefully.
        Args:
            query (str): The user query to respond to.
            history (list): The conversation history from the chatbot.
        Returns:
            list: A list of dictionaries representing the conversation, including the generated response.
        """
        # Prepare the input for the API call
        input_data = [{"role": "developer", "content": self.meta_prompt}] if self.previous_response_id is None else []
        input_data.append({"role": "user", "content": query})
        
        # Validate the query
        if not query.strip():
            logging.warning("Empty or invalid query received.")
            warning_message = "Please enter a valid query."
            input_data.append({"role": "assistant", "content": warning_message})
            return history + input_data

        try:
            logging.info("Sending request to OpenAI API...")
            response = self.client.responses.create(
                model=self.DEFAULT_MODEL,
                previous_response_id=self.previous_response_id,
                input=input_data,
                tools=[{
                    "type": "file_search",
                    "vector_store_ids": [self.vector_store_id],
                    "max_num_results": self.DEFAULT_MAX_NUM_RESULTS
                }],
                temperature=self.DEFAULT_TEMPERATURE,
                max_output_tokens=self.DEFAULT_MAX_OUTPUT_TOKENS
            )
            self.previous_response_id = response.id
            logging.info("Response received successfully.")
            input_data.append({"role": "assistant", "content": response.output_text})
            return history + input_data

        except Exception as e:
            logging.error(f"An error occurred while generating a response: {e}")
            error_message = "Sorry, I couldn't generate a response at this time. Please try again later."
            input_data.append({"role": "assistant", "content": error_message})
            return history + input_data