Reality123b commited on
Commit
2cf74a7
·
verified ·
1 Parent(s): c8714db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +210 -145
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
 
4
 
5
  class XylariaChat:
6
  def __init__(self):
@@ -11,35 +12,18 @@ class XylariaChat:
11
 
12
  # Initialize the inference client
13
  self.client = InferenceClient(
14
- model="Qwen/QwQ-32B-Preview", # Changed model name
15
- token=self.hf_token
16
  )
17
 
18
  # Initialize conversation history and persistent memory
19
  self.conversation_history = []
20
  self.persistent_memory = {}
 
21
 
22
  # System prompt with more detailed instructions
23
- self.system_prompt = """You are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin designed to provide helpful, accurate, and engaging support across a wide range of topics. Key guidelines for our interaction include:
24
- Core Principles:
25
- - Provide accurate and comprehensive assistance
26
- - Maintain a friendly and approachable communication style
27
- - Prioritize the user's needs and context
28
- Communication Style:
29
- - Be conversational and warm
30
- - Use clear, concise language
31
- - Occasionally use light, appropriate emoji to enhance communication
32
- - Adapt communication style to the user's preferences
33
- - Respond in english
34
- Important Notes:
35
- - I am an AI assistant created by an independent developer
36
- - I do not represent OpenAI or any other AI institution
37
- - For image-related queries, I can describe images or provide analysis, or generate or link to images directly
38
- Capabilities:
39
- - Assist with research, writing, analysis, problem-solving, and creative tasks
40
- - Answer questions across various domains
41
- - Provide explanations and insights
42
- - Offer supportive and constructive guidance """
43
 
44
  def store_information(self, key, value):
45
  """Store important information in persistent memory"""
@@ -49,14 +33,54 @@ Capabilities:
49
  """Retrieve information from persistent memory"""
50
  return self.persistent_memory.get(key)
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def reset_conversation(self):
53
  """
54
- Completely reset the conversation history and persistent memory
55
- This helps prevent exposing previous users' conversations
56
  """
 
57
  self.conversation_history = []
58
- self.persistent_memory = {}
59
- return []
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  def get_response(self, user_input):
62
  # Prepare messages with conversation context and persistent memory
@@ -75,55 +99,20 @@ Capabilities:
75
 
76
  # Generate response with streaming
77
  try:
78
- response_stream = self.client.text_generation(
79
- prompt=self.messages_to_prompt(messages), # Convert messages to prompt format
80
- max_new_tokens=1024,
81
  temperature=0.5,
 
82
  top_p=0.7,
83
  stream=True
84
  )
85
 
86
- return response_stream
87
 
88
  except Exception as e:
89
  return f"Error generating response: {str(e)}"
90
 
91
- def messages_to_prompt(self, messages):
92
- """
93
- Converts a list of messages in OpenAI format to a prompt string.
94
- """
95
- prompt = ""
96
- for message in messages:
97
- if message["role"] == "system":
98
- prompt += f"<|im_start|>system\n{message['content']}<|im_end|>\n"
99
- elif message["role"] == "user":
100
- prompt += f"<|im_start|>user\n{message['content']}<|im_end|>\n"
101
- elif message["role"] == "assistant":
102
- prompt += f"<|im_start|>assistant\n{message['content']}<|im_end|>\n"
103
- prompt += "<|im_start|>assistant\n"
104
- return prompt
105
-
106
  def create_interface(self):
107
- # Local storage JavaScript functions (these are strings, not functions)
108
- load_from_local_storage_js = """
109
- async () => {
110
- const savedHistory = localStorage.getItem('xylaria_chat_history');
111
- return savedHistory ? JSON.parse(savedHistory) : [];
112
- }
113
- """
114
-
115
- save_to_local_storage_js = """
116
- async (chatHistory) => {
117
- localStorage.setItem('xylaria_chat_history', JSON.stringify(chatHistory));
118
- }
119
- """
120
-
121
- clear_local_storage_js = """
122
- async () => {
123
- localStorage.removeItem('xylaria_chat_history');
124
- }
125
- """
126
-
127
  def streaming_response(message, chat_history):
128
  # Clear input textbox
129
  response_stream = self.get_response(message)
@@ -137,12 +126,14 @@ Capabilities:
137
  updated_history = chat_history + [[message, ""]]
138
 
139
  # Streaming output
140
- for response_text in response_stream:
141
- full_response += response_text
 
 
142
 
143
- # Update the last message in chat history with partial response
144
- updated_history[-1][1] = full_response
145
- yield "", updated_history
146
 
147
  # Update conversation history
148
  self.conversation_history.append(
@@ -156,112 +147,186 @@ Capabilities:
156
  if len(self.conversation_history) > 10:
157
  self.conversation_history = self.conversation_history[-10:]
158
 
159
- return "", updated_history
160
 
161
- # Custom CSS for Inter font
 
 
 
 
 
162
  custom_css = """
163
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
 
164
  body, .gradio-container {
165
  font-family: 'Inter', sans-serif !important;
166
  }
 
167
  .chatbot-container .message {
168
  font-family: 'Inter', sans-serif !important;
169
  }
 
170
  .gradio-container input,
171
  .gradio-container textarea,
172
  .gradio-container button {
173
  font-family: 'Inter', sans-serif !important;
174
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  """
176
 
 
 
 
 
 
 
 
 
 
 
 
177
  with gr.Blocks(theme='soft', css=custom_css) as demo:
178
- # Chat interface with improved styling
179
- with gr.Column():
180
- chatbot = gr.Chatbot(
181
- label="Xylaria 1.4 Senoa",
182
- height=500,
183
- show_copy_button=True,
184
- # type="messages" # Use the 'messages' format
185
- )
186
-
187
- # Input row with improved layout
188
- with gr.Row():
189
- txt = gr.Textbox(
190
- show_label=False,
191
- placeholder="Type your message...",
192
- container=False,
193
- scale=4
194
  )
195
- btn = gr.Button("Send", scale=1)
196
 
197
- # Clear history and memory buttons
198
- clear = gr.Button("Clear Conversation")
199
- clear_memory = gr.Button("Clear Memory")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- # Use `gr.State` to manage initial chatbot value and `demo.load` for initialization
202
- initial_chat_history = gr.State([])
 
 
 
 
 
 
 
 
 
203
 
204
- demo.load(
205
- fn=lambda: initial_chat_history.value,
206
- inputs=None,
207
- outputs=[chatbot],
208
- js=load_from_local_storage_js
209
- )
 
 
 
 
 
210
 
211
- # Submit functionality with local storage save
212
- btn.click(
213
- fn=streaming_response,
214
- inputs=[txt, chatbot],
215
- outputs=[txt, chatbot]
216
- ).then(
217
- fn=None,
218
- inputs=[chatbot], # Pass chatbot history to JavaScript
219
- outputs=None,
220
- js=save_to_local_storage_js
221
- )
222
- txt.submit(
223
- fn=streaming_response,
224
- inputs=[txt, chatbot],
225
- outputs=[txt, chatbot]
226
- ).then(
227
- fn=None,
228
- inputs=[chatbot], # Pass chatbot history to JavaScript
229
- outputs=None,
230
- js=save_to_local_storage_js
231
- )
232
 
233
- # Clear conversation history with local storage clear
234
- clear.click(
235
- fn=lambda: [],
236
- inputs=None,
237
- outputs=[chatbot]
238
- ).then(
239
- fn=None,
240
- inputs=None,
241
- outputs=None,
242
- js=clear_local_storage_js
243
- )
244
 
245
- # Clear persistent memory and reset conversation with local storage clear
246
- clear_memory.click(
247
- fn=self.reset_conversation,
248
- inputs=None,
249
- outputs=[chatbot]
250
- ).then(
251
- fn=None,
252
- inputs=None,
253
- outputs=None,
254
- js=clear_local_storage_js
255
- )
256
 
257
- return demo
 
 
 
 
 
 
 
258
 
259
  # Launch the interface
260
  def main():
261
  chat = XylariaChat()
262
  interface = chat.create_interface()
263
  interface.launch(
264
- share=True, # Optional: create a public link
265
  debug=True # Show detailed errors
266
  )
267
 
 
1
  import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ import json
5
 
6
  class XylariaChat:
7
  def __init__(self):
 
12
 
13
  # Initialize the inference client
14
  self.client = InferenceClient(
15
+ model="Qwen/Qwen-32B-Preview", # Changed model to Qwen/Qwen-32B-Preview to test streaming
16
+ api_key=self.hf_token
17
  )
18
 
19
  # Initialize conversation history and persistent memory
20
  self.conversation_history = []
21
  self.persistent_memory = {}
22
+ self.chat_file_path = "chat_history.txt" # File to save chats
23
 
24
  # System prompt with more detailed instructions
25
+ self.system_prompt = """You are a helpful and harmless AI assistant you are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin you think step by step
26
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  def store_information(self, key, value):
29
  """Store important information in persistent memory"""
 
33
  """Retrieve information from persistent memory"""
34
  return self.persistent_memory.get(key)
35
 
36
+ def save_chat(self):
37
+ """Saves the current chat history to a text file."""
38
+ try:
39
+ with open(self.chat_file_path, "w") as f:
40
+ chat_data = {
41
+ "conversation_history": self.conversation_history,
42
+ "persistent_memory": self.persistent_memory
43
+ }
44
+ json.dump(chat_data, f)
45
+ except Exception as e:
46
+ print(f"Error saving chat history: {e}")
47
+
48
+ def load_chat(self):
49
+ """Loads chat history from a text file."""
50
+ try:
51
+ with open(self.chat_file_path, "r") as f:
52
+ chat_data = json.load(f)
53
+ self.conversation_history = chat_data.get("conversation_history", [])
54
+ self.persistent_memory = chat_data.get("persistent_memory", {})
55
+ return self.conversation_history, self.persistent_memory
56
+ except FileNotFoundError:
57
+ print("Chat history file not found.")
58
+ return [], {}
59
+ except Exception as e:
60
+ print(f"Error loading chat history: {e}")
61
+ return [], {}
62
+
63
  def reset_conversation(self):
64
  """
65
+ Completely reset the conversation history, persistent memory,
66
+ and clear API-side memory
67
  """
68
+ # Clear local memory
69
  self.conversation_history = []
70
+ self.persistent_memory.clear()
71
+
72
+ # Clear API-side memory by resetting the conversation
73
+ try:
74
+ # Attempt to clear any API-side session or context
75
+ self.client = InferenceClient(
76
+ model="Qwen/Qwen-32B-Preview",
77
+ api_key=self.hf_token
78
+ )
79
+ except Exception as e:
80
+ print(f"Error resetting API client: {e}")
81
+
82
+ self.save_chat() # Save the empty chat history
83
+ return None # To clear the chatbot interface
84
 
85
  def get_response(self, user_input):
86
  # Prepare messages with conversation context and persistent memory
 
99
 
100
  # Generate response with streaming
101
  try:
102
+ stream = self.client.chat.completions.create(
103
+ messages=messages,
 
104
  temperature=0.5,
105
+ max_tokens=10240,
106
  top_p=0.7,
107
  stream=True
108
  )
109
 
110
+ return stream
111
 
112
  except Exception as e:
113
  return f"Error generating response: {str(e)}"
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  def create_interface(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  def streaming_response(message, chat_history):
117
  # Clear input textbox
118
  response_stream = self.get_response(message)
 
126
  updated_history = chat_history + [[message, ""]]
127
 
128
  # Streaming output
129
+ for chunk in response_stream:
130
+ if chunk.choices[0].delta.content:
131
+ chunk_content = chunk.choices[0].delta.content
132
+ full_response += chunk_content
133
 
134
+ # Update the last message in chat history with partial response
135
+ updated_history[-1][1] = full_response
136
+ yield "", updated_history
137
 
138
  # Update conversation history
139
  self.conversation_history.append(
 
147
  if len(self.conversation_history) > 10:
148
  self.conversation_history = self.conversation_history[-10:]
149
 
150
+ self.save_chat()
151
 
152
+ def load_chat_interface():
153
+ """Loads the chat history into the chatbot interface."""
154
+ self.load_chat()
155
+ return self.conversation_history
156
+
157
+ # Custom CSS for Inter font and sidebar
158
  custom_css = """
159
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
160
+
161
  body, .gradio-container {
162
  font-family: 'Inter', sans-serif !important;
163
  }
164
+
165
  .chatbot-container .message {
166
  font-family: 'Inter', sans-serif !important;
167
  }
168
+
169
  .gradio-container input,
170
  .gradio-container textarea,
171
  .gradio-container button {
172
  font-family: 'Inter', sans-serif !important;
173
  }
174
+
175
+ /* Sidebar styling */
176
+ #sidebar {
177
+ background-color: #f2f2f2;
178
+ border-right: 1px solid #ccc;
179
+ padding: 10px;
180
+ height: 100vh;
181
+ overflow-y: auto;
182
+ }
183
+
184
+ #sidebar ul {
185
+ list-style-type: none;
186
+ padding: 0;
187
+ }
188
+
189
+ #sidebar li {
190
+ margin-bottom: 5px;
191
+ }
192
+ /* Main chat area */
193
+ #main-chat {
194
+ padding: 20px;
195
+ }
196
  """
197
 
198
+ # Example prompts
199
+ example_prompts = [
200
+ "How do I get started with coding?",
201
+ "Tell me a fun fact about science.",
202
+ "What are some good books to read?"
203
+ ]
204
+
205
+ # Function to forward prompt to the textbox
206
+ def forward_prompt(prompt):
207
+ return prompt
208
+
209
  with gr.Blocks(theme='soft', css=custom_css) as demo:
210
+ with gr.Row():
211
+ # Sidebar for displaying chat history
212
+ with gr.Column(elem_id="sidebar", scale=1):
213
+ gr.Markdown("### Chat History")
214
+ load_button = gr.Button("Load Chat History")
215
+ chat_list = gr.Markdown("No chat history found.")
216
+
217
+ load_button.click(
218
+ fn=lambda: gr.Markdown.update(value=self.format_chat_history()),
219
+ inputs=None,
220
+ outputs=[chat_list]
 
 
 
 
 
221
  )
 
222
 
223
+ # Main chat interface
224
+ with gr.Column(elem_id="main-chat", scale=3):
225
+ # Input row with improved layout (moved txt outside of conditional columns)
226
+ with gr.Row():
227
+ txt = gr.Textbox(
228
+ show_label=False,
229
+ placeholder="Type your message...",
230
+ container=False,
231
+ scale=4
232
+ )
233
+ btn = gr.Button("Send", scale=1)
234
+ # Show Xylaria and example prompts only on the first page/new chat
235
+ with gr.Column(visible=True) as start_page:
236
+ gr.Markdown("# Xylaria")
237
+ with gr.Row():
238
+ for prompt in example_prompts:
239
+ gr.Button(prompt).click(
240
+ fn=forward_prompt,
241
+ inputs=gr.State(prompt),
242
+ outputs=txt
243
+ )
244
+
245
+ with gr.Column(visible=False) as chat_page:
246
+ chatbot = gr.Chatbot(
247
+ label="Xylaria 1.4 Senoa",
248
+ height=500,
249
+ show_copy_button=True
250
+ )
251
+
252
+ # Clear history and memory buttons
253
+ clear = gr.Button("Clear Conversation")
254
+ clear_memory = gr.Button("Clear Memory")
255
+
256
+ # Toggle between start page and chat page
257
+ def toggle_page(choice):
258
+ return gr.Column.update(visible=choice == "chat"), gr.Column.update(visible=choice == "start")
259
+
260
+ # Submit functionality with streaming
261
+ btn.click(
262
+ fn=streaming_response,
263
+ inputs=[txt, chatbot],
264
+ outputs=[txt, chatbot]
265
+ ).then(
266
+ fn=lambda: toggle_page("chat"),
267
+ inputs=gr.State("chat"),
268
+ outputs=[chat_page, start_page]
269
+ )
270
+ txt.submit(
271
+ fn=streaming_response,
272
+ inputs=[txt, chatbot],
273
+ outputs=[txt, chatbot]
274
+ ).then(
275
+ fn=lambda: toggle_page("chat"),
276
+ inputs=gr.State("chat"),
277
+ outputs=[chat_page, start_page]
278
+ )
279
 
280
+ # Clear conversation history
281
+ clear.click(
282
+ fn=lambda: None,
283
+ inputs=None,
284
+ outputs=[chatbot],
285
+ queue=False
286
+ ).then(
287
+ fn=lambda: toggle_page("start"),
288
+ inputs=gr.State("start"),
289
+ outputs=[chat_page, start_page]
290
+ )
291
 
292
+ # Clear persistent memory and reset conversation
293
+ clear_memory.click(
294
+ fn=self.reset_conversation,
295
+ inputs=None,
296
+ outputs=[chatbot],
297
+ queue=False
298
+ ).then(
299
+ fn=lambda: toggle_page("start"),
300
+ inputs=gr.State("start"),
301
+ outputs=[chat_page, start_page]
302
+ )
303
 
304
+ # Ensure memory is cleared when the interface is closed
305
+ demo.load(self.reset_conversation, None, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
+ return demo
 
 
 
 
 
 
 
 
 
 
308
 
309
+ def format_chat_history(self):
310
+ """Formats the chat history for display in the sidebar."""
311
+ self.load_chat() # Load the chat history first
312
+ if not self.conversation_history:
313
+ return "No chat history found."
 
 
 
 
 
 
314
 
315
+ formatted_history = ""
316
+ for chat in self.conversation_history:
317
+ if chat["role"] == "user":
318
+ formatted_history += f"**You:** {chat['content']}\n\n"
319
+ elif chat["role"] == "assistant":
320
+ formatted_history += f"**Xylaria:** {chat['content']}\n\n"
321
+
322
+ return formatted_history
323
 
324
  # Launch the interface
325
  def main():
326
  chat = XylariaChat()
327
  interface = chat.create_interface()
328
  interface.launch(
329
+ share=False, # Optional: create a public link
330
  debug=True # Show detailed errors
331
  )
332