Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from pocketsphinx import LiveSpeech, get_model_path
|
| 3 |
+
import os
|
| 4 |
+
import sounddevice as sd
|
| 5 |
+
import numpy as np
|
| 6 |
+
import scipy.io.wavfile as wav
|
| 7 |
+
|
| 8 |
+
# Function to capture audio from the mic
|
| 9 |
+
def record_audio(filename='temp.wav', duration=5, fs=16000):
|
| 10 |
+
st.text("Recording Audio...")
|
| 11 |
+
with st.spinner(f'Recording for {duration} seconds...'):
|
| 12 |
+
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=1)
|
| 13 |
+
sd.wait() # Waits until recording is finished
|
| 14 |
+
wav.write(filename, fs, myrecording) # Save as WAV file
|
| 15 |
+
|
| 16 |
+
# Get the model path for pocketsphinx
|
| 17 |
+
model_path = get_model_path()
|
| 18 |
+
config = {
|
| 19 |
+
'verbose': False,
|
| 20 |
+
'hmm': os.path.join(model_path, 'en-us'),
|
| 21 |
+
'lm': os.path.join(model_path, 'en-us.lm.bin'),
|
| 22 |
+
'dict': os.path.join(model_path, 'cmudict-en-us.dict')
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
# Streamlit UI
|
| 26 |
+
st.title("Simple Speech Recognition with Streamlit and PocketSphinx")
|
| 27 |
+
button = st.button("Press to Speak")
|
| 28 |
+
|
| 29 |
+
# Store the state of the recording
|
| 30 |
+
if 'recording_done' not in st.session_state:
|
| 31 |
+
st.session_state.recording_done = False
|
| 32 |
+
|
| 33 |
+
# When the button is pressed
|
| 34 |
+
if button:
|
| 35 |
+
# Record the audio
|
| 36 |
+
record_audio()
|
| 37 |
+
st.session_state.recording_done = True
|
| 38 |
+
|
| 39 |
+
# If an audio was recorded, process it with PocketSphinx
|
| 40 |
+
if st.session_state.recording_done:
|
| 41 |
+
audio = LiveSpeech(**config)
|
| 42 |
+
|
| 43 |
+
st.text("Processing Audio...")
|
| 44 |
+
with st.spinner('Recognizing...'):
|
| 45 |
+
for phrase in audio:
|
| 46 |
+
st.write(phrase)
|
| 47 |
+
break # We'll stop after the first phrase
|
| 48 |
+
|
| 49 |
+
# Reset the state
|
| 50 |
+
st.session_state.recording_done = False
|