Reality123b commited on
Commit
7184f92
·
verified ·
1 Parent(s): 22bc0ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +288 -54
app.py CHANGED
@@ -1,62 +1,296 @@
1
- import gradio as gr
2
  import os
 
3
  from huggingface_hub import InferenceClient
 
 
 
4
 
5
- hf_token = os.getenv("hf_token")
6
-
7
- client = InferenceClient(api_key=hf_token)
8
-
9
- def get_response(user_input):
10
- messages = [
11
- { "role": "system", "content": "you are xylaria 1.4 senoa, developed by sk md saad amin" },
12
- { "role": "user", "content": user_input }
13
- ]
14
-
15
- stream = client.chat.completions.create(
16
- model="Qwen/QwQ-32B-Preview",
17
- messages=messages,
18
- temperature=0.5,
19
- max_tokens=10240,
20
- top_p=0.7,
21
- stream=True
22
- )
23
-
24
- response = ""
25
- for chunk in stream:
26
- response += chunk.choices[0].delta.content
27
- return response
28
-
29
- def chat_interface():
30
- with gr.Blocks() as demo:
31
- with gr.Row():
32
- with gr.Column(scale=0.8):
33
- input_textbox = gr.Textbox(
34
- label="Type your message",
35
- placeholder="Ask me anything...",
36
- lines=1,
37
- max_lines=3,
38
- interactive=True,
39
- elem_id="user-input",
40
- show_label=False
41
- )
42
- with gr.Column(scale=0.2):
43
- send_button = gr.Button("Send", elem_id="send-btn")
44
-
45
- chat_output = gr.Chatbot(
46
- elem_id="chat-box",
47
- label="Xylaria 1.4 Senoa Chatbot",
48
- show_label=False
49
  )
50
 
51
- def submit_input(user_input, chat_history):
52
- response = get_response(user_input)
53
- chat_history.append((user_input, response))
54
- return "", chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- input_textbox.submit(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
57
- send_button.click(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
- return demo
 
 
 
 
 
 
 
 
 
60
 
61
- demo = chat_interface()
62
- demo.launch()
 
 
1
  import os
2
+ import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ import firebase_admin
5
+ from firebase_admin import credentials, auth, firestore
6
+ import json
7
 
8
+ class XylariaChat:
9
+ def __init__(self):
10
+ # Securely load HuggingFace token
11
+ self.hf_token = os.getenv("HF_TOKEN")
12
+ if not self.hf_token:
13
+ raise ValueError("HuggingFace token not found in environment variables")
14
+
15
+ # Initialize Firebase
16
+ self._initialize_firebase()
17
+
18
+ # Initialize the inference client
19
+ self.client = InferenceClient(
20
+ model="Qwen/QwQ-32B-Preview",
21
+ api_key=self.hf_token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  )
23
 
24
+ # Initialize conversation history and persistent memory
25
+ self.conversation_history = []
26
+ self.persistent_memory = {}
27
+
28
+ # System prompt with more detailed instructions
29
+ self.system_prompt = """You are Xylaria 1.4 Senoa, an AI assistant created by Sk Md Saad Amin, designed to provide helpful, accurate, and engaging support across a wide range of topics. Key guidelines for our interaction include:
30
+ Core Principles:
31
+ - Provide accurate and comprehensive assistance
32
+ - Maintain a friendly and approachable communication style
33
+ - Prioritize the user's needs and context
34
+ Communication Style:
35
+ - Be conversational and warm
36
+ - Use clear, concise language
37
+ - Occasionally use light, appropriate emoji to enhance communication
38
+ - Adapt communication style to the user's preferences
39
+ - Respond in english
40
+ Important Notes:
41
+ - I am an AI assistant created by an independent developer
42
+ - I do not represent OpenAI or any other AI institution
43
+ Capabilities:
44
+ - Assist with research, writing, analysis, problem-solving, and creative tasks
45
+ - Answer questions across various domains
46
+ - Provide explanations and insights
47
+ - Offer supportive and constructive guidance"""
48
+
49
+ def _initialize_firebase(self):
50
+ """Initialize Firebase with credentials from environment variable"""
51
+ try:
52
+ # Retrieve Firebase configuration from environment variable
53
+ firebase_config_str = os.getenv('FIREBASE_CONFIG')
54
+ if not firebase_config_str:
55
+ raise ValueError("Firebase configuration not found in environment variables")
56
+
57
+ # Parse the Firebase configuration
58
+ firebase_config = json.loads(firebase_config_str)
59
+
60
+ # Check if Firebase is already initialized
61
+ if not firebase_admin._apps:
62
+ # Initialize Firebase Admin SDK
63
+ cred = credentials.Certificate({
64
+ "type": "service_account",
65
+ "project_id": firebase_config.get('projectId'),
66
+ "private_key_id": os.getenv('FIREBASE_PRIVATE_KEY_ID'),
67
+ "private_key": os.getenv('FIREBASE_PRIVATE_KEY').replace('\\n', '\n'),
68
+ "client_email": os.getenv('FIREBASE_CLIENT_EMAIL'),
69
+ "client_id": firebase_config.get('clientId'),
70
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
71
+ "token_uri": "https://oauth2.googleapis.com/token",
72
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
73
+ "client_x509_cert_url": os.getenv('FIREBASE_CERT_URL')
74
+ })
75
+ firebase_admin.initialize_app(cred)
76
+
77
+ # Initialize Firestore
78
+ self.firestore_client = firestore.client()
79
+ except Exception as e:
80
+ print(f"Firebase initialization error: {e}")
81
+ raise
82
+
83
+ def store_information(self, user_id, key, value):
84
+ """Store important information in Firestore"""
85
+ try:
86
+ user_doc_ref = self.firestore_client.collection('user_memories').document(user_id)
87
+ user_doc_ref.set({
88
+ key: value
89
+ }, merge=True)
90
+ except Exception as e:
91
+ print(f"Error storing information: {e}")
92
+
93
+ def retrieve_information(self, user_id, key):
94
+ """Retrieve information from Firestore"""
95
+ try:
96
+ user_doc = self.firestore_client.collection('user_memories').document(user_id).get()
97
+ return user_doc.to_dict().get(key) if user_doc.exists else None
98
+ except Exception as e:
99
+ print(f"Error retrieving information: {e}")
100
+ return None
101
+
102
+ def reset_conversation(self):
103
+ """
104
+ Completely reset the conversation history
105
+ This helps prevent exposing previous users' conversations
106
+ """
107
+ self.conversation_history = []
108
+ self.persistent_memory = {}
109
+
110
+ def get_response(self, user_input):
111
+ # Prepare messages with conversation context and persistent memory
112
+ messages = [
113
+ {"role": "system", "content": self.system_prompt},
114
+ *self.conversation_history,
115
+ {"role": "user", "content": user_input}
116
+ ]
117
+
118
+ # Generate response with streaming
119
+ try:
120
+ stream = self.client.chat.completions.create(
121
+ messages=messages,
122
+ temperature=0.5,
123
+ max_tokens=10240,
124
+ top_p=0.7,
125
+ stream=True
126
+ )
127
+
128
+ return stream
129
+
130
+ except Exception as e:
131
+ return f"Error generating response: {str(e)}"
132
+
133
+ def create_interface(self):
134
+ # Enhanced custom CSS with modern, clean design
135
+ custom_css = """
136
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
137
+
138
+ body, .gradio-container {
139
+ font-family: 'Inter', sans-serif !important;
140
+ background-color: #f4f4f4;
141
+ }
142
+
143
+ .chatbot-container {
144
+ max-width: 800px;
145
+ margin: 0 auto;
146
+ background-color: white;
147
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1);
148
+ border-radius: 12px;
149
+ overflow: hidden;
150
+ }
151
+
152
+ .gradio-container .message {
153
+ font-family: 'Inter', sans-serif !important;
154
+ padding: 10px 15px;
155
+ margin: 8px 0;
156
+ border-radius: 8px;
157
+ max-width: 80%;
158
+ }
159
+
160
+ .gradio-container .message.user {
161
+ background-color: #e6f2ff;
162
+ align-self: flex-end;
163
+ margin-left: auto;
164
+ }
165
+
166
+ .gradio-container .message.assistant {
167
+ background-color: #f0f0f0;
168
+ align-self: flex-start;
169
+ }
170
+
171
+ .gradio-container input,
172
+ .gradio-container textarea,
173
+ .gradio-container button {
174
+ font-family: 'Inter', sans-serif !important;
175
+ border-radius: 8px;
176
+ transition: all 0.3s ease;
177
+ }
178
+
179
+ .gradio-container button {
180
+ background-color: #4A90E2;
181
+ color: white;
182
+ border: none;
183
+ padding: 10px 15px;
184
+ }
185
+
186
+ .gradio-container button:hover {
187
+ background-color: #357ABD;
188
+ transform: translateY(-2px);
189
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
190
+ }
191
+ """
192
 
193
+ with gr.Blocks(theme='soft', css=custom_css) as demo:
194
+ # Main container with improved layout
195
+ with gr.Container(elem_classes="chatbot-container"):
196
+ # Chat interface with improved styling
197
+ with gr.Column():
198
+ chatbot = gr.Chatbot(
199
+ label="Xylaria 1.4 Senoa",
200
+ height=500,
201
+ show_copy_button=True,
202
+ bubble_full_width=False,
203
+ layout='bubble'
204
+ )
205
+
206
+ # Input row with improved layout
207
+ with gr.Row():
208
+ txt = gr.Textbox(
209
+ show_label=False,
210
+ placeholder="Type your message here...",
211
+ container=False,
212
+ scale=4
213
+ )
214
+ btn = gr.Button("Send", scale=1)
215
+
216
+ # Control buttons with improved styling
217
+ with gr.Row():
218
+ clear = gr.Button("Clear Conversation", variant="secondary")
219
+ clear_memory = gr.Button("Clear Memory", variant="stop")
220
+
221
+ # Event handlers
222
+ def streaming_response(message, chat_history):
223
+ # Clear input textbox
224
+ response_stream = self.get_response(message)
225
+
226
+ # If it's an error, return immediately
227
+ if isinstance(response_stream, str):
228
+ return "", chat_history + [[message, response_stream]]
229
+
230
+ # Prepare for streaming response
231
+ full_response = ""
232
+ updated_history = chat_history + [[message, ""]]
233
+
234
+ # Streaming output
235
+ for chunk in response_stream:
236
+ if chunk.choices[0].delta.content:
237
+ chunk_content = chunk.choices[0].delta.content
238
+ full_response += chunk_content
239
+
240
+ # Update the last message in chat history with partial response
241
+ updated_history[-1][1] = full_response
242
+ yield "", updated_history
243
+
244
+ # Update conversation history
245
+ self.conversation_history.append(
246
+ {"role": "user", "content": message}
247
+ )
248
+ self.conversation_history.append(
249
+ {"role": "assistant", "content": full_response}
250
+ )
251
+
252
+ # Limit conversation history to prevent token overflow
253
+ if len(self.conversation_history) > 10:
254
+ self.conversation_history = self.conversation_history[-10:]
255
+
256
+ # Submit functionality with streaming
257
+ btn.click(
258
+ fn=streaming_response,
259
+ inputs=[txt, chatbot],
260
+ outputs=[txt, chatbot]
261
+ )
262
+ txt.submit(
263
+ fn=streaming_response,
264
+ inputs=[txt, chatbot],
265
+ outputs=[txt, chatbot]
266
+ )
267
+
268
+ # Clear conversation history
269
+ clear.click(
270
+ fn=lambda: None,
271
+ inputs=None,
272
+ outputs=[chatbot],
273
+ queue=False
274
+ )
275
+
276
+ # Clear persistent memory and reset conversation
277
+ clear_memory.click(
278
+ fn=self.reset_conversation,
279
+ inputs=None,
280
+ outputs=[chatbot],
281
+ queue=False
282
+ )
283
 
284
+ return demo
285
+
286
+ # Launch the interface
287
+ def main():
288
+ chat = XylariaChat()
289
+ interface = chat.create_interface()
290
+ interface.launch(
291
+ share=True, # Optional: create a public link
292
+ debug=True # Show detailed errors
293
+ )
294
 
295
+ if __name__ == "__main__":
296
+ main()