Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -70,26 +70,24 @@ def convert_to_wav(filepath):
|
|
70 |
pipe = pipeline("automatic-speech-recognition", model="NbAiLab/nb-whisper-large", chunk_length_s=30, generate_kwargs={'task': 'transcribe', 'language': 'no'})
|
71 |
|
72 |
@spaces.GPU()
|
73 |
-
def transcribe_audio(audio_file, batch_size=16, sample_rate=16000):
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
waveform
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
85 |
|
86 |
-
if samples.ndim > 1:
|
87 |
-
samples = samples[0, :]
|
88 |
-
|
89 |
-
|
90 |
# --pipe it
|
91 |
with torch.no_grad():
|
92 |
-
outputs = pipe(
|
93 |
|
94 |
end_time = time.time()
|
95 |
|
|
|
70 |
pipe = pipeline("automatic-speech-recognition", model="NbAiLab/nb-whisper-large", chunk_length_s=30, generate_kwargs={'task': 'transcribe', 'language': 'no'})
|
71 |
|
72 |
@spaces.GPU()
|
73 |
+
def transcribe_audio(audio_file, filepath, batch_size=16, sample_rate=16000):
|
74 |
+
|
75 |
+
#waveform, sample_rate = torchaudio.load(str(audio_file) #3
|
76 |
+
audio_file = filepath(audio_file)
|
77 |
+
waveform, sample_rate = torchaudio.load(audio_file) #1
|
78 |
+
#waveform, sample_rate = torchaudio.load("{filepath}") #2
|
79 |
+
|
80 |
+
# --convert to mono
|
81 |
+
if waveform.ndim > 1:
|
82 |
+
waveform = waveform[0, :]
|
83 |
+
|
84 |
+
waveform = waveform.numpy()
|
85 |
+
|
86 |
+
start_time = time.time()
|
87 |
|
|
|
|
|
|
|
|
|
88 |
# --pipe it
|
89 |
with torch.no_grad():
|
90 |
+
outputs = pipe(waveform, sampling_rate=sample_rate, batch_size=batch_size, return_timestamps=False)
|
91 |
|
92 |
end_time = time.time()
|
93 |
|