Create transcrriber.py
Browse files- transcrriber.py +41 -0
transcrriber.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import speech_recognition as sr
|
2 |
+
import numpy as np
|
3 |
+
import collections
|
4 |
+
import config
|
5 |
+
|
6 |
+
class SpeechTranscriber:
|
7 |
+
def __init__(self):
|
8 |
+
self.recognizer = sr.Recognizer()
|
9 |
+
self.recognizer.energy_threshold = config.ENERGY_THRESHOLD
|
10 |
+
self.recognizer.dynamic_energy_threshold = config.DYNAMIC_ENERGY_THRESHOLD
|
11 |
+
self.recognizer.pause_threshold = config.PAUSE_THRESHOLD
|
12 |
+
self.audio_buffer = collections.deque(maxlen=config.BUFFER_DURATION * 10)
|
13 |
+
self.last_processed = 0
|
14 |
+
|
15 |
+
def add_audio_chunk(self, audio_chunk):
|
16 |
+
self.audio_buffer.extend(audio_chunk)
|
17 |
+
|
18 |
+
def get_transcript_chunk(self):
|
19 |
+
# Only process if we have enough audio
|
20 |
+
if len(self.audio_buffer) < config.SAMPLE_RATE * config.MIN_PROCESSING_DURATION:
|
21 |
+
return None
|
22 |
+
|
23 |
+
# Convert to AudioData format
|
24 |
+
audio_data = sr.AudioData(
|
25 |
+
np.array(self.audio_buffer).tobytes(),
|
26 |
+
config.SAMPLE_RATE,
|
27 |
+
2 # Sample width in bytes
|
28 |
+
)
|
29 |
+
|
30 |
+
try:
|
31 |
+
# Use Google Web Speech API for best accuracy
|
32 |
+
text = self.recognizer.recognize_google(audio_data)
|
33 |
+
return text
|
34 |
+
except sr.UnknownValueError:
|
35 |
+
return None
|
36 |
+
except sr.RequestError as e:
|
37 |
+
print(f"Speech recognition error: {str(e)}")
|
38 |
+
return None
|
39 |
+
finally:
|
40 |
+
# Clear buffer after processing
|
41 |
+
self.audio_buffer.clear()
|