Merge branch 'main' of github.com:promet99/whisper_streaming into promet99-main
Browse files- whisper_online.py +4 -4
whisper_online.py
CHANGED
|
@@ -192,17 +192,17 @@ class OpenaiApiASR(ASRBase):
|
|
| 192 |
|
| 193 |
o = []
|
| 194 |
for word in segments.words:
|
| 195 |
-
start = word.
|
| 196 |
-
end = word.
|
| 197 |
if any(s[0] <= start <= s[1] for s in no_speech_segments):
|
| 198 |
# print("Skipping word", word.get("word"), "because it's in a no-speech segment")
|
| 199 |
continue
|
| 200 |
-
o.append((start, end, word.
|
| 201 |
return o
|
| 202 |
|
| 203 |
|
| 204 |
def segments_end_ts(self, res):
|
| 205 |
-
return [s
|
| 206 |
|
| 207 |
def transcribe(self, audio_data, prompt=None, *args, **kwargs):
|
| 208 |
# Write the audio data to a buffer
|
|
|
|
| 192 |
|
| 193 |
o = []
|
| 194 |
for word in segments.words:
|
| 195 |
+
start = word.start
|
| 196 |
+
end = word.end
|
| 197 |
if any(s[0] <= start <= s[1] for s in no_speech_segments):
|
| 198 |
# print("Skipping word", word.get("word"), "because it's in a no-speech segment")
|
| 199 |
continue
|
| 200 |
+
o.append((start, end, word.word))
|
| 201 |
return o
|
| 202 |
|
| 203 |
|
| 204 |
def segments_end_ts(self, res):
|
| 205 |
+
return [s.end for s in res.words]
|
| 206 |
|
| 207 |
def transcribe(self, audio_data, prompt=None, *args, **kwargs):
|
| 208 |
# Write the audio data to a buffer
|