CCockrum commited on
Commit
de91506
·
verified ·
1 Parent(s): 5fb66c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -114
app.py CHANGED
@@ -27,9 +27,9 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
27
 
28
  # ✅ Initialize Session State Variables (Ensuring Chat History Persists)
29
  if "chat_history" not in st.session_state:
30
- st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
31
- if "listening" not in st.session_state:
32
- st.session_state.listening = False
33
 
34
  # ✅ Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
35
  def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
@@ -105,19 +105,6 @@ st.markdown("""
105
  .user-msg { background-color: #696969; color: white; }
106
  .assistant-msg { background-color: #333333; color: white; }
107
  .container { display: flex; flex-direction: column; align-items: flex-start; }
108
- .speech-button {
109
- background-color: #4CAF50;
110
- border: none;
111
- color: white;
112
- padding: 10px 15px;
113
- text-align: center;
114
- text-decoration: none;
115
- display: inline-block;
116
- font-size: 16px;
117
- margin: 4px 2px;
118
- cursor: pointer;
119
- border-radius: 12px;
120
- }
121
  .speak-button {
122
  background-color: #2196F3;
123
  border: none;
@@ -131,50 +118,127 @@ st.markdown("""
131
  cursor: pointer;
132
  border-radius: 12px;
133
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  @media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
135
  </style>
136
 
137
  <script>
138
- // Speech Recognition Setup
139
  let recognition;
140
  let isListening = false;
 
 
 
 
141
 
142
  function setupSpeechRecognition() {
143
  try {
144
  window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
145
  recognition = new SpeechRecognition();
146
  recognition.lang = 'en-US';
147
- recognition.interimResults = false;
148
- recognition.maxAlternatives = 1;
 
 
 
 
 
 
149
 
150
  recognition.onresult = function(event) {
151
- const speechResult = event.results[0][0].transcript;
152
- document.getElementById('speech-result').value = speechResult;
153
- document.getElementById('submit-speech').click();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  };
155
 
156
  recognition.onerror = function(event) {
157
  console.error('Speech recognition error:', event.error);
158
- isListening = false;
159
- updateMicButton();
 
 
 
 
 
 
160
  };
161
 
162
  recognition.onend = function() {
163
- isListening = false;
164
- updateMicButton();
 
 
 
 
 
165
  };
166
 
167
  return true;
168
  } catch (error) {
169
  console.error('Speech recognition not supported:', error);
 
170
  return false;
171
  }
172
  }
173
 
174
- function toggleSpeechRecognition() {
175
  if (!recognition) {
176
  if (!setupSpeechRecognition()) {
177
- alert('Speech recognition is not supported in your browser.');
178
  return;
179
  }
180
  }
@@ -182,22 +246,31 @@ st.markdown("""
182
  if (isListening) {
183
  recognition.stop();
184
  isListening = false;
 
 
185
  } else {
186
- recognition.start();
187
- isListening = true;
188
  }
189
-
190
- updateMicButton();
191
  }
192
 
193
- function updateMicButton() {
194
- const micButton = document.getElementById('mic-button');
195
- if (micButton) {
196
- micButton.textContent = isListening ? '🛑 Stop Listening' : '🎤 Start Voice Input';
197
- micButton.style.backgroundColor = isListening ? '#f44336' : '#4CAF50';
 
 
198
  }
199
  }
200
 
 
 
 
 
 
 
 
 
201
  // Text-to-Speech functionality
202
  function speakText(text) {
203
  const utterance = new SpeechSynthesisUtterance(text);
@@ -207,88 +280,71 @@ st.markdown("""
207
  window.speechSynthesis.speak(utterance);
208
  }
209
 
 
 
 
 
 
 
 
 
 
 
210
  // Initialize after the page loads
211
  document.addEventListener('DOMContentLoaded', function() {
212
  setupSpeechRecognition();
 
 
213
  });
214
- </script>
215
- """, unsafe_allow_html=True)
216
-
217
- # Add voice control components
218
- col1, col2 = st.columns([4, 1])
219
- with col1:
220
- user_input = st.chat_input("Type your message here...")
221
-
222
- with col2:
223
- st.markdown("""
224
- <button id="mic-button" onclick="toggleSpeechRecognition()" class="speech-button">
225
- 🎤 Start Voice Input
226
- </button>
227
- <input type="hidden" id="speech-result">
228
- <button id="submit-speech" style="display:none;"></button>
229
- """, unsafe_allow_html=True)
230
-
231
- # Handle form for speech input (hidden)
232
- speech_result = st.text_input("Speech Result", key="speech_input", label_visibility="collapsed")
233
- if speech_result:
234
- user_input = speech_result
235
- # Reset the speech input
236
- st.session_state.speech_input = ""
237
-
238
- if user_input:
239
- # Get response and update chat history
240
- response, st.session_state.chat_history = get_response(
241
- system_message="You are a helpful AI assistant specializing in NASA and space information.",
242
- user_text=user_input,
243
- chat_history=st.session_state.chat_history
244
- )
245
-
246
- # ✅ Display chat history with speak buttons
247
- st.markdown("<div class='container'>", unsafe_allow_html=True)
248
- for i, message in enumerate(st.session_state.chat_history):
249
- if message["role"] == "user":
250
- st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
251
- else:
252
- # Fix: Use JavaScript data attribute instead of trying to escape in f-string
253
- content_for_id = f"msg-{i}"
254
- st.markdown(
255
- f"""<div class='assistant-msg'>
256
- <strong>HAL:</strong> {message['content']}
257
- <button onclick="speakText(document.getElementById('{content_for_id}').textContent)" class="speak-button">
258
- 🔊 Speak
259
- </button>
260
- <span id="{content_for_id}" style="display:none">{message['content']}</span>
261
- </div>""",
262
- unsafe_allow_html=True
263
- )
264
- st.markdown("</div>", unsafe_allow_html=True)
265
-
266
- # Add JavaScript event listener for the submit button
267
- components_js = """
268
- <script>
269
- document.getElementById('submit-speech').addEventListener('click', function() {
270
- const speechResult = document.getElementById('speech-result').value;
271
- if (speechResult) {
272
- // Update the Streamlit text input with the speech result
273
- const textInputs = document.querySelectorAll('input[type="text"]');
274
- if (textInputs.length > 0) {
275
- const lastInput = textInputs[0];
276
- lastInput.value = speechResult;
277
- lastInput.dispatchEvent(new Event('input', { bubbles: true }));
278
-
279
- // Find and click the submit button
280
- setTimeout(() => {
281
- const buttons = document.querySelectorAll('button[kind="primaryForm"]');
282
- for (const button of buttons) {
283
- if (button.textContent.includes('Submit')) {
284
- button.click();
285
- break;
286
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  }
288
- }, 100);
289
  }
290
  }
291
- });
292
- </script>
293
- """
294
- st.components.v1.html(components_js, height=0)
 
 
 
 
 
 
27
 
28
  # ✅ Initialize Session State Variables (Ensuring Chat History Persists)
29
  if "chat_history" not in st.session_state:
30
+ st.session_state.chat_history = [{"role": "assistant", "content": "Hello! I'm HAL, your NASA AI Assistant. You can speak to me directly or type your questions. How can I help you today?"}]
31
+ if "auto_speak" not in st.session_state:
32
+ st.session_state.auto_speak = True
33
 
34
  # ✅ Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
35
  def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
 
105
  .user-msg { background-color: #696969; color: white; }
106
  .assistant-msg { background-color: #333333; color: white; }
107
  .container { display: flex; flex-direction: column; align-items: flex-start; }
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  .speak-button {
109
  background-color: #2196F3;
110
  border: none;
 
118
  cursor: pointer;
119
  border-radius: 12px;
120
  }
121
+ .voice-indicator {
122
+ display: inline-block;
123
+ width: 12px;
124
+ height: 12px;
125
+ border-radius: 50%;
126
+ margin-left: 8px;
127
+ vertical-align: middle;
128
+ background-color: #ccc;
129
+ }
130
+ .voice-indicator.active {
131
+ background-color: #4CAF50;
132
+ animation: pulse 1.5s infinite;
133
+ }
134
+ .status-bar {
135
+ padding: 6px 12px;
136
+ border-radius: 5px;
137
+ background-color: #f1f1f1;
138
+ display: flex;
139
+ align-items: center;
140
+ margin-bottom: 10px;
141
+ font-size: 14px;
142
+ }
143
+ @keyframes pulse {
144
+ 0% { opacity: 1; }
145
+ 50% { opacity: 0.5; }
146
+ 100% { opacity: 1; }
147
+ }
148
  @media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
149
  </style>
150
 
151
  <script>
152
+ // Speech Recognition Setup with continuous mode
153
  let recognition;
154
  let isListening = false;
155
+ let silenceTimer;
156
+ let lastSpeechTime = Date.now();
157
+ let lastTranscript = '';
158
+ const SILENCE_THRESHOLD = 3000; // Submit after 3 seconds of silence
159
 
160
  function setupSpeechRecognition() {
161
  try {
162
  window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
163
  recognition = new SpeechRecognition();
164
  recognition.lang = 'en-US';
165
+ recognition.interimResults = true;
166
+ recognition.continuous = true;
167
+
168
+ recognition.onstart = function() {
169
+ isListening = true;
170
+ document.getElementById('voice-indicator').classList.add('active');
171
+ document.getElementById('voice-status').textContent = 'Listening...';
172
+ };
173
 
174
  recognition.onresult = function(event) {
175
+ lastSpeechTime = Date.now();
176
+ clearTimeout(silenceTimer);
177
+
178
+ // Get the latest transcript
179
+ let interimTranscript = '';
180
+ let finalTranscript = '';
181
+
182
+ for (let i = event.resultIndex; i < event.results.length; i++) {
183
+ const transcript = event.results[i][0].transcript;
184
+ if (event.results[i].isFinal) {
185
+ finalTranscript += transcript + ' ';
186
+ } else {
187
+ interimTranscript += transcript;
188
+ }
189
+ }
190
+
191
+ // Update the hidden input with the latest transcript
192
+ const speechResult = (finalTranscript || interimTranscript).trim();
193
+ if (speechResult && speechResult !== lastTranscript) {
194
+ document.getElementById('speech-result').value = speechResult;
195
+ document.getElementById('voice-status').textContent = 'I heard: ' + speechResult;
196
+ lastTranscript = speechResult;
197
+
198
+ // Set a timer to submit after silence
199
+ silenceTimer = setTimeout(() => {
200
+ if (speechResult) {
201
+ document.getElementById('submit-speech').click();
202
+ lastTranscript = '';
203
+ document.getElementById('speech-result').value = '';
204
+ }
205
+ }, SILENCE_THRESHOLD);
206
+ }
207
  };
208
 
209
  recognition.onerror = function(event) {
210
  console.error('Speech recognition error:', event.error);
211
+ if (event.error === 'no-speech') {
212
+ // Just restart listening if there was no speech detected
213
+ restartRecognition();
214
+ } else {
215
+ isListening = false;
216
+ document.getElementById('voice-indicator').classList.remove('active');
217
+ document.getElementById('voice-status').textContent = 'Voice recognition paused. Click to restart.';
218
+ }
219
  };
220
 
221
  recognition.onend = function() {
222
+ // Auto restart if it ends unintentionally
223
+ if (isListening) {
224
+ restartRecognition();
225
+ } else {
226
+ document.getElementById('voice-indicator').classList.remove('active');
227
+ document.getElementById('voice-status').textContent = 'Voice recognition disabled.';
228
+ }
229
  };
230
 
231
  return true;
232
  } catch (error) {
233
  console.error('Speech recognition not supported:', error);
234
+ document.getElementById('voice-status').textContent = 'Voice recognition not supported in this browser.';
235
  return false;
236
  }
237
  }
238
 
239
+ function toggleVoiceRecognition() {
240
  if (!recognition) {
241
  if (!setupSpeechRecognition()) {
 
242
  return;
243
  }
244
  }
 
246
  if (isListening) {
247
  recognition.stop();
248
  isListening = false;
249
+ document.getElementById('voice-indicator').classList.remove('active');
250
+ document.getElementById('voice-status').textContent = 'Voice recognition paused. Click to restart.';
251
  } else {
252
+ startRecognition();
 
253
  }
 
 
254
  }
255
 
256
+ function startRecognition() {
257
+ try {
258
+ recognition.start();
259
+ document.getElementById('voice-status').textContent = 'Listening...';
260
+ } catch (e) {
261
+ console.error('Error starting recognition:', e);
262
+ setTimeout(startRecognition, 200);
263
  }
264
  }
265
 
266
+ function restartRecognition() {
267
+ try {
268
+ recognition.stop();
269
+ } catch (e) {}
270
+
271
+ setTimeout(startRecognition, 200);
272
+ }
273
+
274
  // Text-to-Speech functionality
275
  function speakText(text) {
276
  const utterance = new SpeechSynthesisUtterance(text);
 
280
  window.speechSynthesis.speak(utterance);
281
  }
282
 
283
+ // Auto speak the latest response
284
+ function autoSpeakLatest() {
285
+ const messages = document.querySelectorAll('.assistant-msg');
286
+ if (messages.length > 0) {
287
+ const latestMessage = messages[messages.length - 1];
288
+ const messageId = latestMessage.querySelector('span[id^="msg-"]').id;
289
+ speakText(document.getElementById(messageId).textContent);
290
+ }
291
+ }
292
+
293
  // Initialize after the page loads
294
  document.addEventListener('DOMContentLoaded', function() {
295
  setupSpeechRecognition();
296
+ // Start listening automatically
297
+ setTimeout(startRecognition, 1000);
298
  });
299
+
300
+ // Handle speech input submission
301
+ document.getElementById('submit-speech').addEventListener('click', function() {
302
+ const speechResult = document.getElementById('speech-result').value;
303
+ if (speechResult) {
304
+ // Update the Streamlit text input with the speech result
305
+ const textInputs = document.querySelectorAll('input[type="text"]');
306
+ if (textInputs.length > 0) {
307
+ const lastInput = textInputs[0];
308
+ lastInput.value = speechResult;
309
+ lastInput.dispatchEvent(new Event('input', { bubbles: true }));
310
+
311
+ // Find and click the submit button
312
+ setTimeout(() => {
313
+ const buttons = document.querySelectorAll('button[kind="primaryForm"]');
314
+ for (const button of buttons) {
315
+ if (button.textContent.includes('Submit')) {
316
+ button.click();
317
+ break;
318
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  }
320
+ }, 100);
321
+ }
322
+ }
323
+ });
324
+
325
+ // Auto-speak for newest message if enabled
326
+ function checkForNewMessages() {
327
+ const autoSpeakEnabled = document.querySelector('input[type="checkbox"][aria-label="Auto-speak responses"]').checked;
328
+ if (autoSpeakEnabled) {
329
+ const messages = document.querySelectorAll('.assistant-msg');
330
+ if (messages.length > 0) {
331
+ const latestMessage = messages[messages.length - 1];
332
+ const messageId = latestMessage.querySelector('span[id^="msg-"]').id;
333
+
334
+ // Only speak if this is a new message
335
+ if (!latestMessage.hasAttribute('data-spoken')) {
336
+ speakText(document.getElementById(messageId).textContent);
337
+ latestMessage.setAttribute('data-spoken', 'true');
338
  }
339
+ }
340
  }
341
  }
342
+
343
+ // Check for new messages every second
344
+ setInterval(checkForNewMessages, 1000);
345
+ </script>
346
+ """, unsafe_allow_html=True)
347
+
348
+ # Add voice status indicator
349
+ st.markdown("""
350
+ <div class="status