Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
-
import firebase_admin
|
5 |
-
from firebase_admin import credentials, auth, firestore
|
6 |
-
import json
|
7 |
|
8 |
class XylariaChat:
|
9 |
def __init__(self):
|
@@ -12,9 +9,6 @@ class XylariaChat:
|
|
12 |
if not self.hf_token:
|
13 |
raise ValueError("HuggingFace token not found in environment variables")
|
14 |
|
15 |
-
# Initialize Firebase
|
16 |
-
self._initialize_firebase()
|
17 |
-
|
18 |
# Initialize the inference client
|
19 |
self.client = InferenceClient(
|
20 |
model="Qwen/QwQ-32B-Preview",
|
@@ -26,7 +20,7 @@ class XylariaChat:
|
|
26 |
self.persistent_memory = {}
|
27 |
|
28 |
# System prompt with more detailed instructions
|
29 |
-
self.system_prompt = """You are Xylaria 1.4 Senoa,
|
30 |
Core Principles:
|
31 |
- Provide accurate and comprehensive assistance
|
32 |
- Maintain a friendly and approachable communication style
|
@@ -40,68 +34,24 @@ Communication Style:
|
|
40 |
Important Notes:
|
41 |
- I am an AI assistant created by an independent developer
|
42 |
- I do not represent OpenAI or any other AI institution
|
|
|
43 |
Capabilities:
|
44 |
- Assist with research, writing, analysis, problem-solving, and creative tasks
|
45 |
- Answer questions across various domains
|
46 |
- Provide explanations and insights
|
47 |
-
- Offer supportive and constructive guidance"""
|
48 |
|
49 |
-
def
|
50 |
-
"""
|
51 |
-
|
52 |
-
# Retrieve Firebase configuration from environment variable
|
53 |
-
firebase_config_str = os.getenv('FIREBASE_CONFIG')
|
54 |
-
if not firebase_config_str:
|
55 |
-
raise ValueError("Firebase configuration not found in environment variables")
|
56 |
-
|
57 |
-
# Parse the Firebase configuration
|
58 |
-
firebase_config = json.loads(firebase_config_str)
|
59 |
-
|
60 |
-
# Check if Firebase is already initialized
|
61 |
-
if not firebase_admin._apps:
|
62 |
-
# Initialize Firebase Admin SDK
|
63 |
-
cred = credentials.Certificate({
|
64 |
-
"type": "service_account",
|
65 |
-
"project_id": firebase_config.get('projectId'),
|
66 |
-
"private_key_id": os.getenv('FIREBASE_PRIVATE_KEY_ID'),
|
67 |
-
"private_key": os.getenv('FIREBASE_PRIVATE_KEY').replace('\\n', '\n'),
|
68 |
-
"client_email": os.getenv('FIREBASE_CLIENT_EMAIL'),
|
69 |
-
"client_id": firebase_config.get('clientId'),
|
70 |
-
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
71 |
-
"token_uri": "https://oauth2.googleapis.com/token",
|
72 |
-
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
73 |
-
"client_x509_cert_url": os.getenv('FIREBASE_CERT_URL')
|
74 |
-
})
|
75 |
-
firebase_admin.initialize_app(cred)
|
76 |
-
|
77 |
-
# Initialize Firestore
|
78 |
-
self.firestore_client = firestore.client()
|
79 |
-
except Exception as e:
|
80 |
-
print(f"Firebase initialization error: {e}")
|
81 |
-
raise
|
82 |
|
83 |
-
def
|
84 |
-
"""
|
85 |
-
|
86 |
-
user_doc_ref = self.firestore_client.collection('user_memories').document(user_id)
|
87 |
-
user_doc_ref.set({
|
88 |
-
key: value
|
89 |
-
}, merge=True)
|
90 |
-
except Exception as e:
|
91 |
-
print(f"Error storing information: {e}")
|
92 |
-
|
93 |
-
def retrieve_information(self, user_id, key):
|
94 |
-
"""Retrieve information from Firestore"""
|
95 |
-
try:
|
96 |
-
user_doc = self.firestore_client.collection('user_memories').document(user_id).get()
|
97 |
-
return user_doc.to_dict().get(key) if user_doc.exists else None
|
98 |
-
except Exception as e:
|
99 |
-
print(f"Error retrieving information: {e}")
|
100 |
-
return None
|
101 |
|
102 |
def reset_conversation(self):
|
103 |
"""
|
104 |
-
Completely reset the conversation history
|
105 |
This helps prevent exposing previous users' conversations
|
106 |
"""
|
107 |
self.conversation_history = []
|
@@ -115,6 +65,13 @@ Capabilities:
|
|
115 |
{"role": "user", "content": user_input}
|
116 |
]
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
# Generate response with streaming
|
119 |
try:
|
120 |
stream = self.client.chat.completions.create(
|
@@ -131,154 +88,136 @@ Capabilities:
|
|
131 |
return f"Error generating response: {str(e)}"
|
132 |
|
133 |
def create_interface(self):
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
custom_css = """
|
136 |
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
|
137 |
|
138 |
body, .gradio-container {
|
139 |
font-family: 'Inter', sans-serif !important;
|
140 |
-
background-color: #f4f4f4;
|
141 |
-
}
|
142 |
-
|
143 |
-
.chatbot-container {
|
144 |
-
max-width: 800px;
|
145 |
-
margin: 0 auto;
|
146 |
-
background-color: white;
|
147 |
-
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
|
148 |
-
border-radius: 12px;
|
149 |
-
overflow: hidden;
|
150 |
}
|
151 |
|
152 |
-
.
|
153 |
font-family: 'Inter', sans-serif !important;
|
154 |
-
padding: 10px 15px;
|
155 |
-
margin: 8px 0;
|
156 |
-
border-radius: 8px;
|
157 |
-
max-width: 80%;
|
158 |
-
}
|
159 |
-
|
160 |
-
.gradio-container .message.user {
|
161 |
-
background-color: #e6f2ff;
|
162 |
-
align-self: flex-end;
|
163 |
-
margin-left: auto;
|
164 |
-
}
|
165 |
-
|
166 |
-
.gradio-container .message.assistant {
|
167 |
-
background-color: #f0f0f0;
|
168 |
-
align-self: flex-start;
|
169 |
}
|
170 |
|
171 |
.gradio-container input,
|
172 |
.gradio-container textarea,
|
173 |
.gradio-container button {
|
174 |
font-family: 'Inter', sans-serif !important;
|
175 |
-
border-radius: 8px;
|
176 |
-
transition: all 0.3s ease;
|
177 |
}
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
padding: 10px 15px;
|
184 |
}
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
|
|
|
|
|
|
190 |
}
|
191 |
"""
|
192 |
|
193 |
-
with gr.Blocks(theme='soft', css=custom_css) as demo:
|
194 |
-
#
|
195 |
-
with gr.
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
show_copy_button=True,
|
202 |
-
bubble_full_width=False,
|
203 |
-
layout='bubble'
|
204 |
-
)
|
205 |
-
|
206 |
-
# Input row with improved layout
|
207 |
-
with gr.Row():
|
208 |
-
txt = gr.Textbox(
|
209 |
-
show_label=False,
|
210 |
-
placeholder="Type your message here...",
|
211 |
-
container=False,
|
212 |
-
scale=4
|
213 |
-
)
|
214 |
-
btn = gr.Button("Send", scale=1)
|
215 |
-
|
216 |
-
# Control buttons with improved styling
|
217 |
-
with gr.Row():
|
218 |
-
clear = gr.Button("Clear Conversation", variant="secondary")
|
219 |
-
clear_memory = gr.Button("Clear Memory", variant="stop")
|
220 |
-
|
221 |
-
# Event handlers
|
222 |
-
def streaming_response(message, chat_history):
|
223 |
-
# Clear input textbox
|
224 |
-
response_stream = self.get_response(message)
|
225 |
-
|
226 |
-
# If it's an error, return immediately
|
227 |
-
if isinstance(response_stream, str):
|
228 |
-
return "", chat_history + [[message, response_stream]]
|
229 |
-
|
230 |
-
# Prepare for streaming response
|
231 |
-
full_response = ""
|
232 |
-
updated_history = chat_history + [[message, ""]]
|
233 |
-
|
234 |
-
# Streaming output
|
235 |
-
for chunk in response_stream:
|
236 |
-
if chunk.choices[0].delta.content:
|
237 |
-
chunk_content = chunk.choices[0].delta.content
|
238 |
-
full_response += chunk_content
|
239 |
-
|
240 |
-
# Update the last message in chat history with partial response
|
241 |
-
updated_history[-1][1] = full_response
|
242 |
-
yield "", updated_history
|
243 |
-
|
244 |
-
# Update conversation history
|
245 |
-
self.conversation_history.append(
|
246 |
-
{"role": "user", "content": message}
|
247 |
-
)
|
248 |
-
self.conversation_history.append(
|
249 |
-
{"role": "assistant", "content": full_response}
|
250 |
)
|
251 |
|
252 |
-
#
|
253 |
-
|
254 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
|
256 |
-
# Submit functionality with streaming
|
257 |
btn.click(
|
258 |
fn=streaming_response,
|
259 |
inputs=[txt, chatbot],
|
260 |
outputs=[txt, chatbot]
|
|
|
|
|
|
|
261 |
)
|
262 |
txt.submit(
|
263 |
fn=streaming_response,
|
264 |
inputs=[txt, chatbot],
|
265 |
outputs=[txt, chatbot]
|
|
|
|
|
|
|
266 |
)
|
267 |
|
268 |
# Clear conversation history
|
269 |
clear.click(
|
270 |
-
fn=lambda:
|
271 |
inputs=None,
|
272 |
-
outputs=[chatbot]
|
273 |
-
|
|
|
|
|
274 |
)
|
275 |
|
276 |
# Clear persistent memory and reset conversation
|
277 |
clear_memory.click(
|
278 |
-
fn=
|
279 |
-
inputs=None,
|
280 |
-
outputs=[chatbot]
|
281 |
-
|
|
|
|
|
282 |
)
|
283 |
|
284 |
return demo
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
4 |
|
5 |
class XylariaChat:
|
6 |
def __init__(self):
|
|
|
9 |
if not self.hf_token:
|
10 |
raise ValueError("HuggingFace token not found in environment variables")
|
11 |
|
|
|
|
|
|
|
12 |
# Initialize the inference client
|
13 |
self.client = InferenceClient(
|
14 |
model="Qwen/QwQ-32B-Preview",
|
|
|
20 |
self.persistent_memory = {}
|
21 |
|
22 |
# System prompt with more detailed instructions
|
23 |
+
self.system_prompt = """You are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin designed to provide helpful, accurate, and engaging support across a wide range of topics. Key guidelines for our interaction include:
|
24 |
Core Principles:
|
25 |
- Provide accurate and comprehensive assistance
|
26 |
- Maintain a friendly and approachable communication style
|
|
|
34 |
Important Notes:
|
35 |
- I am an AI assistant created by an independent developer
|
36 |
- I do not represent OpenAI or any other AI institution
|
37 |
+
- For image-related queries, I can describe images or provide analysis, or generate or link to images directly
|
38 |
Capabilities:
|
39 |
- Assist with research, writing, analysis, problem-solving, and creative tasks
|
40 |
- Answer questions across various domains
|
41 |
- Provide explanations and insights
|
42 |
+
- Offer supportive and constructive guidance """
|
43 |
|
44 |
+
def store_information(self, key, value):
|
45 |
+
"""Store important information in persistent memory"""
|
46 |
+
self.persistent_memory[key] = value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
def retrieve_information(self, key):
|
49 |
+
"""Retrieve information from persistent memory"""
|
50 |
+
return self.persistent_memory.get(key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def reset_conversation(self):
|
53 |
"""
|
54 |
+
Completely reset the conversation history and persistent memory
|
55 |
This helps prevent exposing previous users' conversations
|
56 |
"""
|
57 |
self.conversation_history = []
|
|
|
65 |
{"role": "user", "content": user_input}
|
66 |
]
|
67 |
|
68 |
+
# Add persistent memory context if available
|
69 |
+
if self.persistent_memory:
|
70 |
+
memory_context = "Remembered Information:\n" + "\n".join(
|
71 |
+
[f"{k}: {v}" for k, v in self.persistent_memory.items()]
|
72 |
+
)
|
73 |
+
messages.insert(1, {"role": "system", "content": memory_context})
|
74 |
+
|
75 |
# Generate response with streaming
|
76 |
try:
|
77 |
stream = self.client.chat.completions.create(
|
|
|
88 |
return f"Error generating response: {str(e)}"
|
89 |
|
90 |
def create_interface(self):
|
91 |
+
def streaming_response(message, chat_history):
|
92 |
+
# Clear input textbox
|
93 |
+
response_stream = self.get_response(message)
|
94 |
+
|
95 |
+
# If it's an error, return immediately
|
96 |
+
if isinstance(response_stream, str):
|
97 |
+
return "", chat_history + [[message, response_stream]]
|
98 |
+
|
99 |
+
# Prepare for streaming response
|
100 |
+
full_response = ""
|
101 |
+
updated_history = chat_history + [[message, ""]]
|
102 |
+
|
103 |
+
# Streaming output
|
104 |
+
for chunk in response_stream:
|
105 |
+
if chunk.choices[0].delta.content:
|
106 |
+
chunk_content = chunk.choices[0].delta.content
|
107 |
+
full_response += chunk_content
|
108 |
+
|
109 |
+
# Update the last message in chat history with partial response
|
110 |
+
updated_history[-1][1] = full_response
|
111 |
+
yield "", updated_history
|
112 |
+
|
113 |
+
# Update conversation history
|
114 |
+
self.conversation_history.append(
|
115 |
+
{"role": "user", "content": message}
|
116 |
+
)
|
117 |
+
self.conversation_history.append(
|
118 |
+
{"role": "assistant", "content": full_response}
|
119 |
+
)
|
120 |
+
|
121 |
+
# Limit conversation history to prevent token overflow
|
122 |
+
if len(self.conversation_history) > 10:
|
123 |
+
self.conversation_history = self.conversation_history[-10:]
|
124 |
+
|
125 |
+
return "", updated_history
|
126 |
+
|
127 |
+
# Custom CSS for Inter font and local storage script
|
128 |
custom_css = """
|
129 |
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
|
130 |
|
131 |
body, .gradio-container {
|
132 |
font-family: 'Inter', sans-serif !important;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
}
|
134 |
|
135 |
+
.chatbot-container .message {
|
136 |
font-family: 'Inter', sans-serif !important;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
}
|
138 |
|
139 |
.gradio-container input,
|
140 |
.gradio-container textarea,
|
141 |
.gradio-container button {
|
142 |
font-family: 'Inter', sans-serif !important;
|
|
|
|
|
143 |
}
|
144 |
+
"""
|
145 |
+
|
146 |
+
local_storage_js = """
|
147 |
+
function saveToLocalStorage(chatHistory) {
|
148 |
+
localStorage.setItem('xylaria_chat_history', JSON.stringify(chatHistory));
|
|
|
149 |
}
|
150 |
+
|
151 |
+
function loadFromLocalStorage() {
|
152 |
+
const savedHistory = localStorage.getItem('xylaria_chat_history');
|
153 |
+
return savedHistory ? JSON.parse(savedHistory) : [];
|
154 |
+
}
|
155 |
+
|
156 |
+
function clearLocalStorage() {
|
157 |
+
localStorage.removeItem('xylaria_chat_history');
|
158 |
}
|
159 |
"""
|
160 |
|
161 |
+
with gr.Blocks(theme='soft', css=custom_css, js=local_storage_js) as demo:
|
162 |
+
# Chat interface with improved styling
|
163 |
+
with gr.Column():
|
164 |
+
chatbot = gr.Chatbot(
|
165 |
+
label="Xylaria 1.4 Senoa",
|
166 |
+
height=500,
|
167 |
+
show_copy_button=True,
|
168 |
+
value=gr.CSVDataset(local_storage_fn=lambda: "loadFromLocalStorage()")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
)
|
170 |
|
171 |
+
# Input row with improved layout
|
172 |
+
with gr.Row():
|
173 |
+
txt = gr.Textbox(
|
174 |
+
show_label=False,
|
175 |
+
placeholder="Type your message...",
|
176 |
+
container=False,
|
177 |
+
scale=4
|
178 |
+
)
|
179 |
+
btn = gr.Button("Send", scale=1)
|
180 |
+
|
181 |
+
# Clear history and memory buttons
|
182 |
+
clear = gr.Button("Clear Conversation")
|
183 |
+
clear_memory = gr.Button("Clear Memory")
|
184 |
|
185 |
+
# Submit functionality with streaming and local storage save
|
186 |
btn.click(
|
187 |
fn=streaming_response,
|
188 |
inputs=[txt, chatbot],
|
189 |
outputs=[txt, chatbot]
|
190 |
+
).then(
|
191 |
+
fn=None, # JavaScript callback
|
192 |
+
_js='(chatHistory) => saveToLocalStorage(chatHistory)'
|
193 |
)
|
194 |
txt.submit(
|
195 |
fn=streaming_response,
|
196 |
inputs=[txt, chatbot],
|
197 |
outputs=[txt, chatbot]
|
198 |
+
).then(
|
199 |
+
fn=None, # JavaScript callback
|
200 |
+
_js='(chatHistory) => saveToLocalStorage(chatHistory)'
|
201 |
)
|
202 |
|
203 |
# Clear conversation history
|
204 |
clear.click(
|
205 |
+
fn=lambda: [],
|
206 |
inputs=None,
|
207 |
+
outputs=[chatbot]
|
208 |
+
).then(
|
209 |
+
fn=None, # JavaScript callback
|
210 |
+
_js='() => clearLocalStorage()'
|
211 |
)
|
212 |
|
213 |
# Clear persistent memory and reset conversation
|
214 |
clear_memory.click(
|
215 |
+
fn=lambda: [],
|
216 |
+
inputs=None,
|
217 |
+
outputs=[chatbot]
|
218 |
+
).then(
|
219 |
+
fn=None, # JavaScript callback
|
220 |
+
_js='() => clearLocalStorage()'
|
221 |
)
|
222 |
|
223 |
return demo
|