Update soni_translate/speech_segmentation.py
Browse files
soni_translate/speech_segmentation.py
CHANGED
|
@@ -27,7 +27,7 @@ def random_sleep():
|
|
| 27 |
time.sleep(sleep_time)
|
| 28 |
|
| 29 |
|
| 30 |
-
@spaces.GPU(duration=
|
| 31 |
def load_and_transcribe_audio(asr_model, audio, compute_type, language, asr_options, batch_size, segment_duration_limit):
|
| 32 |
# Load model
|
| 33 |
model = whisperx.load_model(
|
|
@@ -81,7 +81,7 @@ def load_align_and_align_segments(result, audio, DAMHF):
|
|
| 81 |
|
| 82 |
return alignment_result
|
| 83 |
|
| 84 |
-
@spaces.GPU(duration=
|
| 85 |
def diarize_audio(diarize_model, audio_wav, min_speakers, max_speakers):
|
| 86 |
|
| 87 |
if os.environ.get("ZERO_GPU") == "TRUE":
|
|
|
|
| 27 |
time.sleep(sleep_time)
|
| 28 |
|
| 29 |
|
| 30 |
+
@spaces.GPU(duration=110)
|
| 31 |
def load_and_transcribe_audio(asr_model, audio, compute_type, language, asr_options, batch_size, segment_duration_limit):
|
| 32 |
# Load model
|
| 33 |
model = whisperx.load_model(
|
|
|
|
| 81 |
|
| 82 |
return alignment_result
|
| 83 |
|
| 84 |
+
@spaces.GPU(duration=110)
|
| 85 |
def diarize_audio(diarize_model, audio_wav, min_speakers, max_speakers):
|
| 86 |
|
| 87 |
if os.environ.get("ZERO_GPU") == "TRUE":
|