Reality123b commited on
Commit
be67c98
·
verified ·
1 Parent(s): 6ab3b21

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -35
app.py CHANGED
@@ -8,17 +8,17 @@ class XylariaChat:
8
  self.hf_token = os.getenv("HF_TOKEN")
9
  if not self.hf_token:
10
  raise ValueError("HuggingFace token not found in environment variables")
11
-
12
  # Initialize the inference client
13
  self.client = InferenceClient(
14
- model="Qwen/QwQ-32B-Preview",
15
  api_key=self.hf_token
16
  )
17
-
18
  # Initialize conversation history and persistent memory
19
  self.conversation_history = []
20
  self.persistent_memory = {}
21
-
22
  # System prompt with more detailed instructions
23
  self.system_prompt = """You are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin designed to provide helpful, accurate, and engaging support across a wide range of topics. Key guidelines for our interaction include:
24
  Core Principles:
@@ -64,14 +64,14 @@ Capabilities:
64
  *self.conversation_history,
65
  {"role": "user", "content": user_input}
66
  ]
67
-
68
  # Add persistent memory context if available
69
  if self.persistent_memory:
70
  memory_context = "Remembered Information:\n" + "\n".join(
71
  [f"{k}: {v}" for k, v in self.persistent_memory.items()]
72
  )
73
  messages.insert(1, {"role": "system", "content": memory_context})
74
-
75
  # Generate response with streaming
76
  try:
77
  stream = self.client.chat.completions.create(
@@ -81,9 +81,9 @@ Capabilities:
81
  top_p=0.7,
82
  stream=True
83
  )
84
-
85
  return stream
86
-
87
  except Exception as e:
88
  return f"Error generating response: {str(e)}"
89
 
@@ -107,25 +107,25 @@ Capabilities:
107
  def streaming_response(message, chat_history):
108
  # Clear input textbox
109
  response_stream = self.get_response(message)
110
-
111
  # If it's an error, return immediately
112
  if isinstance(response_stream, str):
113
  return "", chat_history + [[message, response_stream]]
114
-
115
  # Prepare for streaming response
116
  full_response = ""
117
  updated_history = chat_history + [[message, ""]]
118
-
119
  # Streaming output
120
  for chunk in response_stream:
121
  if chunk.choices[0].delta.content:
122
  chunk_content = chunk.choices[0].delta.content
123
  full_response += chunk_content
124
-
125
  # Update the last message in chat history with partial response
126
  updated_history[-1][1] = full_response
127
  yield "", updated_history
128
-
129
  # Update conversation history
130
  self.conversation_history.append(
131
  {"role": "user", "content": message}
@@ -133,25 +133,25 @@ Capabilities:
133
  self.conversation_history.append(
134
  {"role": "assistant", "content": full_response}
135
  )
136
-
137
  # Limit conversation history to prevent token overflow
138
  if len(self.conversation_history) > 10:
139
  self.conversation_history = self.conversation_history[-10:]
140
-
141
  return "", updated_history
142
 
143
  # Custom CSS for Inter font
144
  custom_css = """
145
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
146
-
147
  body, .gradio-container {
148
  font-family: 'Inter', sans-serif !important;
149
  }
150
-
151
  .chatbot-container .message {
152
  font-family: 'Inter', sans-serif !important;
153
  }
154
-
155
  .gradio-container input,
156
  .gradio-container textarea,
157
  .gradio-container button {
@@ -166,61 +166,71 @@ Capabilities:
166
  label="Xylaria 1.4 Senoa",
167
  height=500,
168
  show_copy_button=True,
169
- value=lambda: gr.load("json", "loadFromLocalStorage()")
170
  )
171
-
172
  # Input row with improved layout
173
  with gr.Row():
174
  txt = gr.Textbox(
175
- show_label=False,
176
- placeholder="Type your message...",
177
  container=False,
178
  scale=4
179
  )
180
  btn = gr.Button("Send", scale=1)
181
-
182
  # Clear history and memory buttons
183
  clear = gr.Button("Clear Conversation")
184
  clear_memory = gr.Button("Clear Memory")
185
-
 
 
 
 
 
 
 
 
 
 
186
  # Submit functionality with local storage save
187
  btn.click(
188
- fn=streaming_response,
189
- inputs=[txt, chatbot],
190
  outputs=[txt, chatbot]
191
  ).then(
192
  fn=None, # JavaScript callback
193
  _js='(chatHistory) => saveToLocalStorage(chatHistory)'
194
  )
195
  txt.submit(
196
- fn=streaming_response,
197
- inputs=[txt, chatbot],
198
  outputs=[txt, chatbot]
199
  ).then(
200
  fn=None, # JavaScript callback
201
  _js='(chatHistory) => saveToLocalStorage(chatHistory)'
202
  )
203
-
204
  # Clear conversation history with local storage clear
205
  clear.click(
206
- fn=lambda: [],
207
- inputs=None,
208
  outputs=[chatbot]
209
  ).then(
210
  fn=None, # JavaScript callback
211
  _js='() => clearLocalStorage()'
212
  )
213
-
214
  # Clear persistent memory and reset conversation with local storage clear
215
  clear_memory.click(
216
- fn=lambda: [],
217
- inputs=None,
218
  outputs=[chatbot]
219
  ).then(
220
  fn=None, # JavaScript callback
221
  _js='() => clearLocalStorage()'
222
  )
223
-
224
  return demo
225
 
226
  # Launch the interface
 
8
  self.hf_token = os.getenv("HF_TOKEN")
9
  if not self.hf_token:
10
  raise ValueError("HuggingFace token not found in environment variables")
11
+
12
  # Initialize the inference client
13
  self.client = InferenceClient(
14
+ model="Qwen/QwQ-32B-Preview",
15
  api_key=self.hf_token
16
  )
17
+
18
  # Initialize conversation history and persistent memory
19
  self.conversation_history = []
20
  self.persistent_memory = {}
21
+
22
  # System prompt with more detailed instructions
23
  self.system_prompt = """You are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin designed to provide helpful, accurate, and engaging support across a wide range of topics. Key guidelines for our interaction include:
24
  Core Principles:
 
64
  *self.conversation_history,
65
  {"role": "user", "content": user_input}
66
  ]
67
+
68
  # Add persistent memory context if available
69
  if self.persistent_memory:
70
  memory_context = "Remembered Information:\n" + "\n".join(
71
  [f"{k}: {v}" for k, v in self.persistent_memory.items()]
72
  )
73
  messages.insert(1, {"role": "system", "content": memory_context})
74
+
75
  # Generate response with streaming
76
  try:
77
  stream = self.client.chat.completions.create(
 
81
  top_p=0.7,
82
  stream=True
83
  )
84
+
85
  return stream
86
+
87
  except Exception as e:
88
  return f"Error generating response: {str(e)}"
89
 
 
107
  def streaming_response(message, chat_history):
108
  # Clear input textbox
109
  response_stream = self.get_response(message)
110
+
111
  # If it's an error, return immediately
112
  if isinstance(response_stream, str):
113
  return "", chat_history + [[message, response_stream]]
114
+
115
  # Prepare for streaming response
116
  full_response = ""
117
  updated_history = chat_history + [[message, ""]]
118
+
119
  # Streaming output
120
  for chunk in response_stream:
121
  if chunk.choices[0].delta.content:
122
  chunk_content = chunk.choices[0].delta.content
123
  full_response += chunk_content
124
+
125
  # Update the last message in chat history with partial response
126
  updated_history[-1][1] = full_response
127
  yield "", updated_history
128
+
129
  # Update conversation history
130
  self.conversation_history.append(
131
  {"role": "user", "content": message}
 
133
  self.conversation_history.append(
134
  {"role": "assistant", "content": full_response}
135
  )
136
+
137
  # Limit conversation history to prevent token overflow
138
  if len(self.conversation_history) > 10:
139
  self.conversation_history = self.conversation_history[-10:]
140
+
141
  return "", updated_history
142
 
143
  # Custom CSS for Inter font
144
  custom_css = """
145
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
146
+
147
  body, .gradio-container {
148
  font-family: 'Inter', sans-serif !important;
149
  }
150
+
151
  .chatbot-container .message {
152
  font-family: 'Inter', sans-serif !important;
153
  }
154
+
155
  .gradio-container input,
156
  .gradio-container textarea,
157
  .gradio-container button {
 
166
  label="Xylaria 1.4 Senoa",
167
  height=500,
168
  show_copy_button=True,
169
+ # value=lambda: gr.load("json", "loadFromLocalStorage()") # Remove this line
170
  )
171
+
172
  # Input row with improved layout
173
  with gr.Row():
174
  txt = gr.Textbox(
175
+ show_label=False,
176
+ placeholder="Type your message...",
177
  container=False,
178
  scale=4
179
  )
180
  btn = gr.Button("Send", scale=1)
181
+
182
  # Clear history and memory buttons
183
  clear = gr.Button("Clear Conversation")
184
  clear_memory = gr.Button("Clear Memory")
185
+
186
+ # Load chat history from local storage on page load
187
+ demo.load(
188
+ fn=None, # No Python function needed, just JavaScript
189
+ _js="() => loadFromLocalStorage()" # Call your JavaScript function
190
+ ).then(
191
+ fn=lambda x: x,
192
+ inputs=chatbot, # Use chatbot as input to get the loaded value
193
+ outputs=chatbot
194
+ )
195
+
196
  # Submit functionality with local storage save
197
  btn.click(
198
+ fn=streaming_response,
199
+ inputs=[txt, chatbot],
200
  outputs=[txt, chatbot]
201
  ).then(
202
  fn=None, # JavaScript callback
203
  _js='(chatHistory) => saveToLocalStorage(chatHistory)'
204
  )
205
  txt.submit(
206
+ fn=streaming_response,
207
+ inputs=[txt, chatbot],
208
  outputs=[txt, chatbot]
209
  ).then(
210
  fn=None, # JavaScript callback
211
  _js='(chatHistory) => saveToLocalStorage(chatHistory)'
212
  )
213
+
214
  # Clear conversation history with local storage clear
215
  clear.click(
216
+ fn=lambda: [],
217
+ inputs=None,
218
  outputs=[chatbot]
219
  ).then(
220
  fn=None, # JavaScript callback
221
  _js='() => clearLocalStorage()'
222
  )
223
+
224
  # Clear persistent memory and reset conversation with local storage clear
225
  clear_memory.click(
226
+ fn=lambda: [],
227
+ inputs=None,
228
  outputs=[chatbot]
229
  ).then(
230
  fn=None, # JavaScript callback
231
  _js='() => clearLocalStorage()'
232
  )
233
+
234
  return demo
235
 
236
  # Launch the interface