Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,6 +28,8 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="π")
|
|
28 |
# β
Initialize Session State Variables (Ensuring Chat History Persists)
|
29 |
if "chat_history" not in st.session_state:
|
30 |
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
|
|
|
|
31 |
|
32 |
# β
Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
|
33 |
def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
|
@@ -89,7 +91,7 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=800):
|
|
89 |
# β
Streamlit UI
|
90 |
st.title("π HAL - NASA AI Assistant")
|
91 |
|
92 |
-
# β
|
93 |
st.markdown("""
|
94 |
<style>
|
95 |
.user-msg, .assistant-msg {
|
@@ -103,26 +105,187 @@ st.markdown("""
|
|
103 |
.user-msg { background-color: #696969; color: white; }
|
104 |
.assistant-msg { background-color: #333333; color: white; }
|
105 |
.container { display: flex; flex-direction: column; align-items: flex-start; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
@media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
|
107 |
</style>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
""", unsafe_allow_html=True)
|
109 |
|
110 |
-
#
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
if user_input:
|
114 |
# Get response and update chat history
|
115 |
response, st.session_state.chat_history = get_response(
|
116 |
-
system_message="You are a helpful AI assistant.",
|
117 |
user_text=user_input,
|
118 |
chat_history=st.session_state.chat_history
|
119 |
)
|
120 |
|
121 |
-
# β
Display chat history
|
122 |
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
123 |
-
for message in st.session_state.chat_history:
|
124 |
if message["role"] == "user":
|
125 |
st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
|
126 |
else:
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
# β
Initialize Session State Variables (Ensuring Chat History Persists)
|
29 |
if "chat_history" not in st.session_state:
|
30 |
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
31 |
+
if "listening" not in st.session_state:
|
32 |
+
st.session_state.listening = False
|
33 |
|
34 |
# β
Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
|
35 |
def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
|
|
|
91 |
# β
Streamlit UI
|
92 |
st.title("π HAL - NASA AI Assistant")
|
93 |
|
94 |
+
# β
Add styles and speech recognition JavaScript
|
95 |
st.markdown("""
|
96 |
<style>
|
97 |
.user-msg, .assistant-msg {
|
|
|
105 |
.user-msg { background-color: #696969; color: white; }
|
106 |
.assistant-msg { background-color: #333333; color: white; }
|
107 |
.container { display: flex; flex-direction: column; align-items: flex-start; }
|
108 |
+
.speech-button {
|
109 |
+
background-color: #4CAF50;
|
110 |
+
border: none;
|
111 |
+
color: white;
|
112 |
+
padding: 10px 15px;
|
113 |
+
text-align: center;
|
114 |
+
text-decoration: none;
|
115 |
+
display: inline-block;
|
116 |
+
font-size: 16px;
|
117 |
+
margin: 4px 2px;
|
118 |
+
cursor: pointer;
|
119 |
+
border-radius: 12px;
|
120 |
+
}
|
121 |
+
.speak-button {
|
122 |
+
background-color: #2196F3;
|
123 |
+
border: none;
|
124 |
+
color: white;
|
125 |
+
padding: 5px 10px;
|
126 |
+
text-align: center;
|
127 |
+
text-decoration: none;
|
128 |
+
display: inline-block;
|
129 |
+
font-size: 12px;
|
130 |
+
margin: 2px 2px;
|
131 |
+
cursor: pointer;
|
132 |
+
border-radius: 12px;
|
133 |
+
}
|
134 |
@media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
|
135 |
</style>
|
136 |
+
|
137 |
+
<script>
|
138 |
+
// Speech Recognition Setup
|
139 |
+
let recognition;
|
140 |
+
let isListening = false;
|
141 |
+
|
142 |
+
function setupSpeechRecognition() {
|
143 |
+
try {
|
144 |
+
window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
145 |
+
recognition = new SpeechRecognition();
|
146 |
+
recognition.lang = 'en-US';
|
147 |
+
recognition.interimResults = false;
|
148 |
+
recognition.maxAlternatives = 1;
|
149 |
+
|
150 |
+
recognition.onresult = function(event) {
|
151 |
+
const speechResult = event.results[0][0].transcript;
|
152 |
+
document.getElementById('speech-result').value = speechResult;
|
153 |
+
document.getElementById('submit-speech').click();
|
154 |
+
};
|
155 |
+
|
156 |
+
recognition.onerror = function(event) {
|
157 |
+
console.error('Speech recognition error:', event.error);
|
158 |
+
isListening = false;
|
159 |
+
updateMicButton();
|
160 |
+
};
|
161 |
+
|
162 |
+
recognition.onend = function() {
|
163 |
+
isListening = false;
|
164 |
+
updateMicButton();
|
165 |
+
};
|
166 |
+
|
167 |
+
return true;
|
168 |
+
} catch (error) {
|
169 |
+
console.error('Speech recognition not supported:', error);
|
170 |
+
return false;
|
171 |
+
}
|
172 |
+
}
|
173 |
+
|
174 |
+
function toggleSpeechRecognition() {
|
175 |
+
if (!recognition) {
|
176 |
+
if (!setupSpeechRecognition()) {
|
177 |
+
alert('Speech recognition is not supported in your browser.');
|
178 |
+
return;
|
179 |
+
}
|
180 |
+
}
|
181 |
+
|
182 |
+
if (isListening) {
|
183 |
+
recognition.stop();
|
184 |
+
isListening = false;
|
185 |
+
} else {
|
186 |
+
recognition.start();
|
187 |
+
isListening = true;
|
188 |
+
}
|
189 |
+
|
190 |
+
updateMicButton();
|
191 |
+
}
|
192 |
+
|
193 |
+
function updateMicButton() {
|
194 |
+
const micButton = document.getElementById('mic-button');
|
195 |
+
if (micButton) {
|
196 |
+
micButton.textContent = isListening ? 'π Stop Listening' : 'π€ Start Voice Input';
|
197 |
+
micButton.style.backgroundColor = isListening ? '#f44336' : '#4CAF50';
|
198 |
+
}
|
199 |
+
}
|
200 |
+
|
201 |
+
// Text-to-Speech functionality
|
202 |
+
function speakText(text) {
|
203 |
+
const utterance = new SpeechSynthesisUtterance(text);
|
204 |
+
utterance.lang = 'en-US';
|
205 |
+
utterance.pitch = 1;
|
206 |
+
utterance.rate = 1;
|
207 |
+
window.speechSynthesis.speak(utterance);
|
208 |
+
}
|
209 |
+
|
210 |
+
// Initialize after the page loads
|
211 |
+
document.addEventListener('DOMContentLoaded', function() {
|
212 |
+
setupSpeechRecognition();
|
213 |
+
});
|
214 |
+
</script>
|
215 |
""", unsafe_allow_html=True)
|
216 |
|
217 |
+
# Add voice control components
|
218 |
+
col1, col2 = st.columns([4, 1])
|
219 |
+
with col1:
|
220 |
+
user_input = st.chat_input("Type your message here...")
|
221 |
+
|
222 |
+
with col2:
|
223 |
+
st.markdown("""
|
224 |
+
<button id="mic-button" onclick="toggleSpeechRecognition()" class="speech-button">
|
225 |
+
π€ Start Voice Input
|
226 |
+
</button>
|
227 |
+
<input type="hidden" id="speech-result">
|
228 |
+
<button id="submit-speech" style="display:none;"></button>
|
229 |
+
""", unsafe_allow_html=True)
|
230 |
+
|
231 |
+
# Handle form for speech input (hidden)
|
232 |
+
speech_result = st.text_input("Speech Result", key="speech_input", label_visibility="collapsed")
|
233 |
+
if speech_result:
|
234 |
+
user_input = speech_result
|
235 |
+
# Reset the speech input
|
236 |
+
st.session_state.speech_input = ""
|
237 |
|
238 |
if user_input:
|
239 |
# Get response and update chat history
|
240 |
response, st.session_state.chat_history = get_response(
|
241 |
+
system_message="You are a helpful AI assistant specializing in NASA and space information.",
|
242 |
user_text=user_input,
|
243 |
chat_history=st.session_state.chat_history
|
244 |
)
|
245 |
|
246 |
+
# β
Display chat history with speak buttons
|
247 |
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
248 |
+
for i, message in enumerate(st.session_state.chat_history):
|
249 |
if message["role"] == "user":
|
250 |
st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
|
251 |
else:
|
252 |
+
speak_button = f"""
|
253 |
+
<button onclick="speakText(`{message['content'].replace('`', '\'').replace('"', '\'')}`)" class="speak-button">
|
254 |
+
π Speak
|
255 |
+
</button>
|
256 |
+
"""
|
257 |
+
st.markdown(
|
258 |
+
f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']} {speak_button}</div>",
|
259 |
+
unsafe_allow_html=True
|
260 |
+
)
|
261 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
262 |
+
|
263 |
+
# Add JavaScript event listener for the submit button
|
264 |
+
components_js = """
|
265 |
+
<script>
|
266 |
+
document.getElementById('submit-speech').addEventListener('click', function() {
|
267 |
+
const speechResult = document.getElementById('speech-result').value;
|
268 |
+
if (speechResult) {
|
269 |
+
// Update the Streamlit text input with the speech result
|
270 |
+
const textInputs = document.querySelectorAll('input[type="text"]');
|
271 |
+
if (textInputs.length > 0) {
|
272 |
+
const lastInput = textInputs[0];
|
273 |
+
lastInput.value = speechResult;
|
274 |
+
lastInput.dispatchEvent(new Event('input', { bubbles: true }));
|
275 |
+
|
276 |
+
// Find and click the submit button
|
277 |
+
setTimeout(() => {
|
278 |
+
const buttons = document.querySelectorAll('button[kind="primaryForm"]');
|
279 |
+
for (const button of buttons) {
|
280 |
+
if (button.textContent.includes('Submit')) {
|
281 |
+
button.click();
|
282 |
+
break;
|
283 |
+
}
|
284 |
+
}
|
285 |
+
}, 100);
|
286 |
+
}
|
287 |
+
}
|
288 |
+
});
|
289 |
+
</script>
|
290 |
+
"""
|
291 |
+
st.components.v1.html(components_js, height=0)
|