mfidabel commited on
Commit
94261e3
·
verified ·
1 Parent(s): 60fe074

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -1,7 +1,7 @@
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
- import spaces
5
  from peft import PeftModel, PeftConfig
6
  from transformers import WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, AutomaticSpeechRecognitionPipeline
7
 
@@ -28,8 +28,7 @@ def transcribe(audio):
28
  sr, y = audio
29
  y = y.astype(np.float32)
30
  y /= np.max(np.abs(y))
31
- with torch.cuda.amp.autocast():
32
- return pipeline({"sampling_rate": sr, "raw": y}, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"]
33
 
34
  examples = [
35
  "./examples/audio_1.mp3",
 
1
+ import spaces
2
  import gradio as gr
3
  import numpy as np
4
  import torch
 
5
  from peft import PeftModel, PeftConfig
6
  from transformers import WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, AutomaticSpeechRecognitionPipeline
7
 
 
28
  sr, y = audio
29
  y = y.astype(np.float32)
30
  y /= np.max(np.abs(y))
31
+ return pipeline({"sampling_rate": sr, "raw": y}, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"]
 
32
 
33
  examples = [
34
  "./examples/audio_1.mp3",