Naz786 commited on
Commit
5dca394
Β·
verified Β·
1 Parent(s): 23910c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -4
app.py CHANGED
@@ -158,9 +158,87 @@ elif page == "Semantic Search":
158
  user_role = st.selectbox("Your Role", USER_ROLES, key="sem_role")
159
  with col4:
160
  explanation_language = st.selectbox("Explanation Language", EXPLANATION_LANGUAGES, key="sem_expl")
161
- question = st.text_input("Ask a question about your code")
162
- st.caption("Example questions:")
163
- st.write(", ".join(EXAMPLE_QUESTIONS))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  if st.button("Run Semantic Search"):
165
  if not code_input.strip() or not question.strip():
166
  st.error("Both code and question are required.")
@@ -170,4 +248,4 @@ elif page == "Semantic Search":
170
  with st.spinner("Running Semantic Search..."):
171
  answer = call_groq_api(f"{question}\n\nCode:\n{code_input}")
172
  st.success("Answer:")
173
- st.write(answer)
 
158
  user_role = st.selectbox("Your Role", USER_ROLES, key="sem_role")
159
  with col4:
160
  explanation_language = st.selectbox("Explanation Language", EXPLANATION_LANGUAGES, key="sem_expl")
161
+ # Initialize session state variables for voice input and auto run
162
+ if "voice_question" not in st.session_state:
163
+ st.session_state.voice_question = ""
164
+ if "auto_run_search" not in st.session_state:
165
+ st.session_state.auto_run_search = False
166
+
167
+ # Container for question input and voice button
168
+ col_question, col_voice = st.columns([8,1])
169
+ with col_question:
170
+ question = st.text_input("Ask a question about your code", value=st.session_state.voice_question, key="question_input")
171
+ with col_voice:
172
+ # Microphone button with custom HTML and JS for voice input
173
+ st.markdown(
174
+ """
175
+ <button id="mic-btn" title="Click to speak" style="height:38px; width:38px; font-size:20px;">🎀</button>
176
+ <script>
177
+ const micBtn = window.parent.document.querySelector('#mic-btn');
178
+ const streamlitDoc = window.parent.document;
179
+
180
+ // Use Web Speech API for voice recognition
181
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
182
+ if (SpeechRecognition) {
183
+ const recognition = new SpeechRecognition();
184
+ recognition.lang = 'en-US';
185
+ recognition.interimResults = false;
186
+ recognition.maxAlternatives = 1;
187
+
188
+ micBtn.onclick = () => {
189
+ recognition.start();
190
+ micBtn.textContent = 'πŸŽ™οΈ';
191
+ };
192
+
193
+ recognition.onresult = (event) => {
194
+ const transcript = event.results[0][0].transcript;
195
+ // Send transcript to Streamlit via custom event
196
+ const inputEvent = new CustomEvent("voiceInput", {detail: transcript});
197
+ streamlitDoc.dispatchEvent(inputEvent);
198
+ micBtn.textContent = '🎀';
199
+ };
200
+
201
+ recognition.onerror = (event) => {
202
+ console.error('Speech recognition error', event.error);
203
+ micBtn.textContent = '🎀';
204
+ };
205
+ } else {
206
+ micBtn.disabled = true;
207
+ micBtn.title = "Speech Recognition not supported in this browser.";
208
+ }
209
+ </script>
210
+ """,
211
+ unsafe_allow_html=True
212
+ )
213
+
214
+ # Listen for the custom event and update session state via Streamlit's experimental_rerun hack
215
+ # This requires a small hack using st.experimental_get_query_params and st.experimental_set_query_params
216
+ # We will use st.experimental_get_query_params to detect voice input from URL params
217
+
218
+ # Check if voice input is passed via query params
219
+ query_params = st.experimental_get_query_params()
220
+ if "voice_input" in query_params:
221
+ voice_text = query_params["voice_input"][0]
222
+ if voice_text != st.session_state.voice_question:
223
+ st.session_state.voice_question = voice_text
224
+ st.session_state.auto_run_search = True
225
+ # Clear the query param to avoid repeated triggers
226
+ st.experimental_set_query_params()
227
+
228
+ # Run semantic search automatically if flag is set
229
+ if st.session_state.auto_run_search:
230
+ st.session_state.auto_run_search = False
231
+ if not code_input.strip() or not st.session_state.voice_question.strip():
232
+ st.error("Both code and question are required.")
233
+ elif not code_matches_language(code_input, programming_language):
234
+ st.error(f"Language mismatch. Please check your code and language selection.")
235
+ else:
236
+ with st.spinner("Running Semantic Search..."):
237
+ answer = call_groq_api(f"{st.session_state.voice_question}\n\nCode:\n{code_input}")
238
+ st.success("Answer:")
239
+ st.write(answer)
240
+
241
+ # Also keep the manual button for fallback
242
  if st.button("Run Semantic Search"):
243
  if not code_input.strip() or not question.strip():
244
  st.error("Both code and question are required.")
 
248
  with st.spinner("Running Semantic Search..."):
249
  answer = call_groq_api(f"{question}\n\nCode:\n{code_input}")
250
  st.success("Answer:")
251
+ st.write(answer)