File size: 7,472 Bytes
39fdb0c
 
 
 
1abc7f3
39fdb0c
 
 
 
 
 
 
f2ef830
3e81867
39fdb0c
 
1b126bd
 
 
39fdb0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d594be1
39fdb0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f064c52
39fdb0c
 
1abc7f3
 
f064c52
39fdb0c
f064c52
39fdb0c
 
 
f064c52
39fdb0c
 
f064c52
39fdb0c
 
 
f064c52
39fdb0c
f064c52
 
39fdb0c
 
f064c52
 
39fdb0c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import os
import json
import logging
from typing import Optional
import asyncio
import gradio as gr
from utils.response_manager import ResponseManager 

class ChatbotInterface:
    def __init__(self, 
                 model: str = "gpt-4o-mini",
                 temperature: float = 0,
                 max_output_tokens: int = 600,
                 max_num_results: int = 5,
                 vector_store_id: Optional[str] = None, 
                 api_key: Optional[str] = None, 
                 meta_prompt_file: Optional[str] = None,
                 config_path: str = 'config/gradio_config.json'
                ):
        """
        Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager.
        :param config_path: Path to the configuration JSON file.
        :param model: The OpenAI model to use (default: 'gpt-4o-mini').
        :param temperature: The temperature for response generation (default: 0).
        :param max_output_tokens: The maximum number of output tokens (default: 800).
        :param max_num_results: The maximum number of search results to return (default: 15).
        :param vector_store_id: The ID of the vector store to use for file search.
        :param api_key: The OpenAI API key for authentication.
        :param meta_prompt_file: Path to the meta prompt file .
        """
        # Parameters for UI
        self.config = self.load_config(config_path)
        self.title = self.config["chatbot_title"]
        self.description = self.config["chatbot_description"]
        self.input_placeholder = self.config["chatbot_input_placeholder"]
        self.output_label = self.config["chatbot_output_label"]

        # Parameters for ResponseManager class 
        self.model = model
        self.temperature = temperature
        self.max_output_tokens = max_output_tokens
        self.max_num_results = max_num_results
        self.vector_store_id = vector_store_id
        self.api_key = api_key
        self.meta_prompt_file = meta_prompt_file
        
           
    @staticmethod
    def load_config(config_path: str) -> dict:
        """
        Load the configuration for Gradio GUI interface from the JSON file.
        :param config_path: Path to the configuration JSON file.
        :return: Configuration dictionary.
        """
        logging.info(f"Loading configuration from {config_path}...")
        if not os.path.exists(config_path):
            logging.error(f"Configuration file not found: {config_path}")
            raise FileNotFoundError(f"Configuration file not found: {config_path}")

        with open(config_path, 'r') as config_file:
            config = json.load(config_file)

        required_keys = [
            "chatbot_title", 
            "chatbot_description", 
            "chatbot_input_placeholder", 
            "chatbot_output_label"
        ]
        
        for key in required_keys:
            if key not in config:
                logging.error(f"Missing required configuration key: {key}")
                raise ValueError(f"Missing required configuration key: {key}")
                
        return config

        
    def create_interface(self) -> gr.Blocks:
        """
        Create the Gradio Blocks interface that displays a single container including both
        the text input and a small arrow submit button. The interface will clear the text input
        after each message is submitted.
        """
        logging.info("Creating Gradio interface...")

        with gr.Blocks() as demo: 
            # Title and description area.
            gr.Markdown(f"## {self.title}\n{self.description}")

            # Chatbot output area.
            chatbot_output = gr.Chatbot(label=self.output_label, type="messages")

            # Session-specific states
            conversation_state = gr.State([])
            response_manager_state = gr.State(None)

            # Row area.
            with gr.Row(elem_id="input-container", equal_height=True):
                reset = gr.ClearButton(
                    value="Clear history 🔄",  
                    variant="secondary",
                    elem_id="reset-button",
                    size="lg"
                )
                user_input = gr.Textbox(
                    lines=1,
                    show_label=False,              # Hide label for a unified look.
                    elem_id="chat-input",
                    placeholder=self.input_placeholder,
                    scale=500,
                )
                

            # Initialization function for session-specific response manager
            def init_response_manager():
                try:
                    rm = ResponseManager(
                            model=self.model,
                            temperature=self.temperature,
                            max_output_tokens=self.max_output_tokens,
                            max_num_results=self.max_num_results,
                            vector_store_id=self.vector_store_id,
                            api_key=self.api_key,
                            meta_prompt_file=self.meta_prompt_file
                        )
                
                    logging.info(
                        "ChatbotInterface initialized with the following parameters:\n"
                        f"  - Model: {self.model}\n"
                        f"  - Temperature: {self.temperature}\n"
                        f"  - Max Output Tokens: {self.max_output_tokens}\n"
                        f"  - Max Number of Results: {self.max_num_results}\n"
                    )
                    
                    rm.reset_conversation()
                    return rm
                except Exception as e:
                    logging.error(f"Failed to initialize ResponseManager: {e}")
                    raise

            # Reset function updated to reset ResponseManager
            def reset_output():
                response_manager = init_response_manager()
                return [], [], response_manager, "" # Returns [chatbot_output,conversation_state, response_manager_state, user_input]
    
            # Process input now uses session-specific ResponseManager
            async def process_input(user_message, chat_history, response_manager):
                updated_history = await response_manager.generate_response(user_message, chat_history)
                return updated_history, updated_history, response_manager, "" # Returns [chatbot_output, conversation_state, response_manager_state, user_input]
    
            # Initialize ResponseManager object for a session on load
            demo.load(
                fn=init_response_manager,
                inputs=None,
                outputs=response_manager_state # Each session state gets its own instance of ResponseManager class
            )

            # CLearButton action
            reset.click(
                fn=reset_output,
                inputs=None,
                outputs=[chatbot_output, conversation_state, response_manager_state, user_input]
            )

            # Enter to trigger response generation
            user_input.submit(
                fn=process_input,
                inputs=[user_input, conversation_state, response_manager_state],
                outputs=[chatbot_output, conversation_state, response_manager_state, user_input]
            )

        logging.info("Gradio interface created successfully.")
        return demo