Reality123b commited on
Commit
55168fa
·
verified ·
1 Parent(s): 2cf74a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -78
app.py CHANGED
@@ -12,7 +12,7 @@ class XylariaChat:
12
 
13
  # Initialize the inference client
14
  self.client = InferenceClient(
15
- model="Qwen/Qwen-32B-Preview", # Changed model to Qwen/Qwen-32B-Preview to test streaming
16
  api_key=self.hf_token
17
  )
18
 
@@ -21,7 +21,7 @@ class XylariaChat:
21
  self.persistent_memory = {}
22
  self.chat_file_path = "chat_history.txt" # File to save chats
23
 
24
- # System prompt with more detailed instructions
25
  self.system_prompt = """You are a helpful and harmless AI assistant you are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin you think step by step
26
  """
27
 
@@ -114,28 +114,21 @@ class XylariaChat:
114
 
115
  def create_interface(self):
116
  def streaming_response(message, chat_history):
117
- # Clear input textbox
118
  response_stream = self.get_response(message)
119
 
120
- # If it's an error, return immediately
121
  if isinstance(response_stream, str):
122
  return "", chat_history + [[message, response_stream]]
123
 
124
- # Prepare for streaming response
125
  full_response = ""
126
  updated_history = chat_history + [[message, ""]]
127
 
128
- # Streaming output
129
  for chunk in response_stream:
130
  if chunk.choices[0].delta.content:
131
  chunk_content = chunk.choices[0].delta.content
132
  full_response += chunk_content
133
-
134
- # Update the last message in chat history with partial response
135
  updated_history[-1][1] = full_response
136
  yield "", updated_history
137
 
138
- # Update conversation history
139
  self.conversation_history.append(
140
  {"role": "user", "content": message}
141
  )
@@ -143,33 +136,44 @@ class XylariaChat:
143
  {"role": "assistant", "content": full_response}
144
  )
145
 
146
- # Limit conversation history to prevent token overflow
147
  if len(self.conversation_history) > 10:
148
  self.conversation_history = self.conversation_history[-10:]
149
 
150
- self.save_chat()
151
 
152
  def load_chat_interface():
153
- """Loads the chat history into the chatbot interface."""
154
  self.load_chat()
155
  return self.conversation_history
156
 
157
- # Custom CSS for Inter font and sidebar
158
  custom_css = """
159
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
160
 
161
  body, .gradio-container {
162
  font-family: 'Inter', sans-serif !important;
 
163
  }
164
-
 
165
  .chatbot-container .message {
166
  font-family: 'Inter', sans-serif !important;
 
 
 
167
  }
168
-
169
- .gradio-container input,
170
- .gradio-container textarea,
171
- .gradio-container button {
172
- font-family: 'Inter', sans-serif !important;
 
 
 
 
 
 
 
 
173
  }
174
 
175
  /* Sidebar styling */
@@ -181,18 +185,28 @@ class XylariaChat:
181
  overflow-y: auto;
182
  }
183
 
184
- #sidebar ul {
185
- list-style-type: none;
186
- padding: 0;
187
- }
188
-
189
- #sidebar li {
190
- margin-bottom: 5px;
191
- }
192
  /* Main chat area */
193
  #main-chat {
194
  padding: 20px;
195
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  """
197
 
198
  # Example prompts
@@ -222,7 +236,7 @@ class XylariaChat:
222
 
223
  # Main chat interface
224
  with gr.Column(elem_id="main-chat", scale=3):
225
- # Input row with improved layout (moved txt outside of conditional columns)
226
  with gr.Row():
227
  txt = gr.Textbox(
228
  show_label=False,
@@ -231,7 +245,8 @@ class XylariaChat:
231
  scale=4
232
  )
233
  btn = gr.Button("Send", scale=1)
234
- # Show Xylaria and example prompts only on the first page/new chat
 
235
  with gr.Column(visible=True) as start_page:
236
  gr.Markdown("# Xylaria")
237
  with gr.Row():
@@ -242,67 +257,70 @@ class XylariaChat:
242
  outputs=txt
243
  )
244
 
 
245
  with gr.Column(visible=False) as chat_page:
246
  chatbot = gr.Chatbot(
247
  label="Xylaria 1.4 Senoa",
248
  height=500,
249
- show_copy_button=True
 
 
250
  )
251
 
252
  # Clear history and memory buttons
253
  clear = gr.Button("Clear Conversation")
254
  clear_memory = gr.Button("Clear Memory")
255
 
256
- # Toggle between start page and chat page
257
- def toggle_page(choice):
258
- return gr.Column.update(visible=choice == "chat"), gr.Column.update(visible=choice == "start")
259
-
260
- # Submit functionality with streaming
261
- btn.click(
262
- fn=streaming_response,
263
- inputs=[txt, chatbot],
264
- outputs=[txt, chatbot]
265
- ).then(
266
- fn=lambda: toggle_page("chat"),
267
- inputs=gr.State("chat"),
268
- outputs=[chat_page, start_page]
269
- )
270
- txt.submit(
271
- fn=streaming_response,
272
- inputs=[txt, chatbot],
273
- outputs=[txt, chatbot]
274
- ).then(
275
- fn=lambda: toggle_page("chat"),
276
- inputs=gr.State("chat"),
277
- outputs=[chat_page, start_page]
278
- )
279
 
280
- # Clear conversation history
281
- clear.click(
282
- fn=lambda: None,
283
- inputs=None,
284
- outputs=[chatbot],
285
- queue=False
286
- ).then(
287
- fn=lambda: toggle_page("start"),
288
- inputs=gr.State("start"),
289
- outputs=[chat_page, start_page]
290
- )
291
 
292
- # Clear persistent memory and reset conversation
293
- clear_memory.click(
294
- fn=self.reset_conversation,
295
- inputs=None,
296
- outputs=[chatbot],
297
- queue=False
298
- ).then(
299
- fn=lambda: toggle_page("start"),
300
- inputs=gr.State("start"),
301
- outputs=[chat_page, start_page]
302
- )
303
 
304
- # Ensure memory is cleared when the interface is closed
305
- demo.load(self.reset_conversation, None, None)
306
 
307
  return demo
308
 
@@ -326,7 +344,7 @@ def main():
326
  chat = XylariaChat()
327
  interface = chat.create_interface()
328
  interface.launch(
329
- share=False, # Optional: create a public link
330
  debug=True # Show detailed errors
331
  )
332
 
 
12
 
13
  # Initialize the inference client
14
  self.client = InferenceClient(
15
+ model="Qwen/Qwen-32B-Preview",
16
  api_key=self.hf_token
17
  )
18
 
 
21
  self.persistent_memory = {}
22
  self.chat_file_path = "chat_history.txt" # File to save chats
23
 
24
+ # System prompt
25
  self.system_prompt = """You are a helpful and harmless AI assistant you are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin you think step by step
26
  """
27
 
 
114
 
115
  def create_interface(self):
116
  def streaming_response(message, chat_history):
 
117
  response_stream = self.get_response(message)
118
 
 
119
  if isinstance(response_stream, str):
120
  return "", chat_history + [[message, response_stream]]
121
 
 
122
  full_response = ""
123
  updated_history = chat_history + [[message, ""]]
124
 
 
125
  for chunk in response_stream:
126
  if chunk.choices[0].delta.content:
127
  chunk_content = chunk.choices[0].delta.content
128
  full_response += chunk_content
 
 
129
  updated_history[-1][1] = full_response
130
  yield "", updated_history
131
 
 
132
  self.conversation_history.append(
133
  {"role": "user", "content": message}
134
  )
 
136
  {"role": "assistant", "content": full_response}
137
  )
138
 
 
139
  if len(self.conversation_history) > 10:
140
  self.conversation_history = self.conversation_history[-10:]
141
 
142
+ self.save_chat() # Save after each interaction
143
 
144
  def load_chat_interface():
 
145
  self.load_chat()
146
  return self.conversation_history
147
 
148
+ # Custom CSS for improved colors and styling
149
  custom_css = """
150
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
151
 
152
  body, .gradio-container {
153
  font-family: 'Inter', sans-serif !important;
154
+ background-color: #f8f8f8; /* Light background */
155
  }
156
+
157
+ /* Chatbot styling */
158
  .chatbot-container .message {
159
  font-family: 'Inter', sans-serif !important;
160
+ padding: 10px 15px;
161
+ border-radius: 10px;
162
+ margin-bottom: 8px; /* Add margin between messages */
163
  }
164
+
165
+ .chatbot-container .user {
166
+ background-color: #e0f2f7; /* Light blue for user messages */
167
+ border: 1px solid #a7d9ed; /* Light blue border */
168
+ }
169
+
170
+ .chatbot-container .assistant {
171
+ background-color: #f0f0f0; /* Light gray for assistant messages */
172
+ border: 1px solid #d3d3d3; /* Light gray border */
173
+ }
174
+
175
+ .chatbot-container .message-tools {
176
+ margin-right: 10px; /* Add some space between text and buttons */
177
  }
178
 
179
  /* Sidebar styling */
 
185
  overflow-y: auto;
186
  }
187
 
 
 
 
 
 
 
 
 
188
  /* Main chat area */
189
  #main-chat {
190
  padding: 20px;
191
  }
192
+
193
+ /* Textbox and buttons */
194
+ .gradio-container input,
195
+ .gradio-container textarea,
196
+ .gradio-container button {
197
+ font-family: 'Inter', sans-serif !important;
198
+ border-radius: 5px; /* Rounded corners */
199
+ }
200
+
201
+ .gradio-container button {
202
+ background-color: #4CAF50; /* Green button */
203
+ color: white;
204
+ transition: background-color 0.2s; /* Smooth transition for hover effect */
205
+ }
206
+
207
+ .gradio-container button:hover {
208
+ background-color: #3e8e41; /* Darker green on hover */
209
+ }
210
  """
211
 
212
  # Example prompts
 
236
 
237
  # Main chat interface
238
  with gr.Column(elem_id="main-chat", scale=3):
239
+ # Input row (stays visible)
240
  with gr.Row():
241
  txt = gr.Textbox(
242
  show_label=False,
 
245
  scale=4
246
  )
247
  btn = gr.Button("Send", scale=1)
248
+
249
+ # Xylaria welcome and example prompts (initially visible)
250
  with gr.Column(visible=True) as start_page:
251
  gr.Markdown("# Xylaria")
252
  with gr.Row():
 
257
  outputs=txt
258
  )
259
 
260
+ # Chat interface (initially hidden)
261
  with gr.Column(visible=False) as chat_page:
262
  chatbot = gr.Chatbot(
263
  label="Xylaria 1.4 Senoa",
264
  height=500,
265
+ show_copy_button=True,
266
+ avatar_images=("user.png", "xylaria.png"), # Replace with your image paths
267
+ bubble_full_width=False
268
  )
269
 
270
  # Clear history and memory buttons
271
  clear = gr.Button("Clear Conversation")
272
  clear_memory = gr.Button("Clear Memory")
273
 
274
+ # Toggle between start and chat pages
275
+ def toggle_page(choice):
276
+ return gr.Column.update(visible=choice == "chat"), gr.Column.update(visible=choice == "start")
277
+
278
+ # Submit prompt
279
+ btn.click(
280
+ fn=streaming_response,
281
+ inputs=[txt, chatbot],
282
+ outputs=[txt, chatbot]
283
+ ).then(
284
+ fn=lambda: toggle_page("chat"),
285
+ inputs=gr.State("chat"),
286
+ outputs=[chat_page, start_page]
287
+ )
288
+ txt.submit(
289
+ fn=streaming_response,
290
+ inputs=[txt, chatbot],
291
+ outputs=[txt, chatbot]
292
+ ).then(
293
+ fn=lambda: toggle_page("chat"),
294
+ inputs=gr.State("chat"),
295
+ outputs=[chat_page, start_page]
296
+ )
297
 
298
+ # Clear conversation
299
+ clear.click(
300
+ fn=lambda: None,
301
+ inputs=None,
302
+ outputs=[chatbot],
303
+ queue=False
304
+ ).then(
305
+ fn=lambda: toggle_page("start"),
306
+ inputs=gr.State("start"),
307
+ outputs=[chat_page, start_page]
308
+ )
309
 
310
+ # Clear memory
311
+ clear_memory.click(
312
+ fn=self.reset_conversation,
313
+ inputs=None,
314
+ outputs=[chatbot],
315
+ queue=False
316
+ ).then(
317
+ fn=lambda: toggle_page("start"),
318
+ inputs=gr.State("start"),
319
+ outputs=[chat_page, start_page]
320
+ )
321
 
322
+ # Load chat history on interface load
323
+ demo.load(self.reset_conversation, None, None)
324
 
325
  return demo
326
 
 
344
  chat = XylariaChat()
345
  interface = chat.create_interface()
346
  interface.launch(
347
+ share=False, # Set to True to create a public link
348
  debug=True # Show detailed errors
349
  )
350