File size: 9,655 Bytes
711f069
7184f92
711f069
8090856
7184f92
 
 
 
 
 
be67c98
7184f92
 
1271777
 
449a90d
be67c98
7184f92
 
 
be67c98
1271777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7184f92
ba3e6fe
 
 
7184f92
ba3e6fe
 
 
7184f92
 
 
1271777
 
7184f92
 
1271777
 
7184f92
 
 
 
 
 
 
 
be67c98
ba3e6fe
 
 
 
 
 
be67c98
7184f92
 
1271777
 
 
7184f92
 
 
 
be67c98
1271777
be67c98
7184f92
 
 
1271777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7184f92
1271777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba3e6fe
1271777
ba3e6fe
be67c98
1271777
ba3e6fe
 
be67c98
1271777
ba3e6fe
 
be67c98
1271777
 
 
be67c98
1271777
 
 
 
 
ba3e6fe
 
 
 
 
 
be67c98
1271777
ba3e6fe
 
be67c98
1271777
d8a976f
1271777
7184f92
 
 
 
 
ba3e6fe
7184f92
55168fa
 
 
 
 
 
ba3e6fe
 
1271777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171db0a
 
1271777
 
55168fa
 
 
1271777
 
 
 
 
55168fa
1271777
55168fa
 
 
1271777
 
 
 
 
d8a976f
 
1271777
55168fa
1271777
f95a698
1271777
55168fa
1271777
f92d4d6
1271777
 
55168fa
3f20358
1271777
55168fa
 
 
1271777
55168fa
1271777
d8a976f
1271777
 
d8a976f
be67c98
1271777
2cf74a7
1271777
7184f92
 
 
 
1271777
7184f92
 
e1ff28f
7184f92
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
import os
import gradio as gr
from huggingface_hub import InferenceClient

class XylariaChat:
    def __init__(self):
        # Securely load HuggingFace token
        self.hf_token = os.getenv("HF_TOKEN")
        if not self.hf_token:
            raise ValueError("HuggingFace token not found in environment variables")

        # Initialize the inference client
        self.client = InferenceClient(
            model="Qwen/QwQ-32B-Preview",  # Changed model name
            token=self.hf_token
        )

        # Initialize conversation history and persistent memory
        self.conversation_history = []
        self.persistent_memory = {}

        # System prompt with more detailed instructions
        self.system_prompt = """You are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin designed to provide helpful, accurate, and engaging support across a wide range of topics. Key guidelines for our interaction include:
Core Principles:
- Provide accurate and comprehensive assistance
- Maintain a friendly and approachable communication style
- Prioritize the user's needs and context
Communication Style:
- Be conversational and warm
- Use clear, concise language
- Occasionally use light, appropriate emoji to enhance communication
- Adapt communication style to the user's preferences
- Respond in english
Important Notes:
- I am an AI assistant created by an independent developer
- I do not represent OpenAI or any other AI institution
- For image-related queries, I can describe images or provide analysis, or generate or link to images directly
Capabilities:
- Assist with research, writing, analysis, problem-solving, and creative tasks
- Answer questions across various domains
- Provide explanations and insights
- Offer supportive and constructive guidance """

    def store_information(self, key, value):
        """Store important information in persistent memory"""
        self.persistent_memory[key] = value

    def retrieve_information(self, key):
        """Retrieve information from persistent memory"""
        return self.persistent_memory.get(key)

    def reset_conversation(self):
        """
        Completely reset the conversation history and persistent memory
        This helps prevent exposing previous users' conversations
        """
        self.conversation_history = []
        self.persistent_memory = {}
        return []

    def get_response(self, user_input):
        # Prepare messages with conversation context and persistent memory
        messages = [
            {"role": "system", "content": self.system_prompt},
            *self.conversation_history,
            {"role": "user", "content": user_input}
        ]

        # Add persistent memory context if available
        if self.persistent_memory:
            memory_context = "Remembered Information:\n" + "\n".join(
                [f"{k}: {v}" for k, v in self.persistent_memory.items()]
            )
            messages.insert(1, {"role": "system", "content": memory_context})

        # Generate response with streaming
        try:
            response_stream = self.client.text_generation(
                prompt=self.messages_to_prompt(messages),  # Convert messages to prompt format
                max_new_tokens=1024,
                temperature=0.5,
                top_p=0.7,
                stream=True
            )

            return response_stream

        except Exception as e:
            return f"Error generating response: {str(e)}"

    def messages_to_prompt(self, messages):
        """
        Converts a list of messages in OpenAI format to a prompt string.
        """
        prompt = ""
        for message in messages:
            if message["role"] == "system":
                prompt += f"<|im_start|>system\n{message['content']}<|im_end|>\n"
            elif message["role"] == "user":
                prompt += f"<|im_start|>user\n{message['content']}<|im_end|>\n"
            elif message["role"] == "assistant":
                prompt += f"<|im_start|>assistant\n{message['content']}<|im_end|>\n"
        prompt += "<|im_start|>assistant\n"
        return prompt

    def create_interface(self):
        # Local storage JavaScript functions (these are strings, not functions)
        load_from_local_storage_js = """
        async () => {
            const savedHistory = localStorage.getItem('xylaria_chat_history');
            return savedHistory ? JSON.parse(savedHistory) : [];
        }
        """

        save_to_local_storage_js = """
        async (chatHistory) => {
            localStorage.setItem('xylaria_chat_history', JSON.stringify(chatHistory));
        }
        """

        clear_local_storage_js = """
        async () => {
            localStorage.removeItem('xylaria_chat_history');
        }
        """

        def streaming_response(message, chat_history):
            # Clear input textbox
            response_stream = self.get_response(message)

            # If it's an error, return immediately
            if isinstance(response_stream, str):
                return "", chat_history + [[message, response_stream]]

            # Prepare for streaming response
            full_response = ""
            updated_history = chat_history + [[message, ""]]

            # Streaming output
            for response_text in response_stream:
                full_response += response_text

                # Update the last message in chat history with partial response
                updated_history[-1][1] = full_response
                yield "", updated_history

            # Update conversation history
            self.conversation_history.append(
                {"role": "user", "content": message}
            )
            self.conversation_history.append(
                {"role": "assistant", "content": full_response}
            )

            # Limit conversation history to prevent token overflow
            if len(self.conversation_history) > 10:
                self.conversation_history = self.conversation_history[-10:]

            return "", updated_history

        # Custom CSS for Inter font
        custom_css = """
        @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
        body, .gradio-container {
            font-family: 'Inter', sans-serif !important;
        }
        .chatbot-container .message {
            font-family: 'Inter', sans-serif !important;
        }
        .gradio-container input, 
        .gradio-container textarea, 
        .gradio-container button {
            font-family: 'Inter', sans-serif !important;
        }
        """

        with gr.Blocks(theme='soft', css=custom_css) as demo:
            # Chat interface with improved styling
            with gr.Column():
                chatbot = gr.Chatbot(
                    label="Xylaria 1.4 Senoa",
                    height=500,
                    show_copy_button=True,
                    # type="messages"  # Use the 'messages' format
                )

                # Input row with improved layout
                with gr.Row():
                    txt = gr.Textbox(
                        show_label=False,
                        placeholder="Type your message...",
                        container=False,
                        scale=4
                    )
                    btn = gr.Button("Send", scale=1)

                # Clear history and memory buttons
                clear = gr.Button("Clear Conversation")
                clear_memory = gr.Button("Clear Memory")

            # Use `gr.State` to manage initial chatbot value and `demo.load` for initialization
            initial_chat_history = gr.State([])

            demo.load(
              fn=lambda: initial_chat_history.value,
              inputs=None,
              outputs=[chatbot],
              js=load_from_local_storage_js
            )

            # Submit functionality with local storage save
            btn.click(
                fn=streaming_response,
                inputs=[txt, chatbot],
                outputs=[txt, chatbot]
            ).then(
                fn=None,
                inputs=[chatbot],  # Pass chatbot history to JavaScript
                outputs=None,
                js=save_to_local_storage_js
            )
            txt.submit(
                fn=streaming_response,
                inputs=[txt, chatbot],
                outputs=[txt, chatbot]
            ).then(
                fn=None,
                inputs=[chatbot],  # Pass chatbot history to JavaScript
                outputs=None,
                js=save_to_local_storage_js
            )

            # Clear conversation history with local storage clear
            clear.click(
                fn=lambda: [],
                inputs=None,
                outputs=[chatbot]
            ).then(
                fn=None,
                inputs=None,
                outputs=None,
                js=clear_local_storage_js
            )

            # Clear persistent memory and reset conversation with local storage clear
            clear_memory.click(
                fn=self.reset_conversation,
                inputs=None,
                outputs=[chatbot]
            ).then(
                fn=None,
                inputs=None,
                outputs=None,
                js=clear_local_storage_js
            )

        return demo

# Launch the interface
def main():
    chat = XylariaChat()
    interface = chat.create_interface()
    interface.launch(
        share=True,  # Optional: create a public link
        debug=True   # Show detailed errors
    )

if __name__ == "__main__":
    main()