sdafd commited on
Commit
77c2d4d
·
verified ·
1 Parent(s): cfd471e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -17,10 +17,10 @@ torch.set_num_threads(os.cpu_count())
17
 
18
  # Pre-load models
19
  models = {
20
- "tiny": whisperx.load_model("tiny", device, compute_type=compute_type),
21
- "base": whisperx.load_model("base", device, compute_type=compute_type),
22
- "small": whisperx.load_model("small", device, compute_type=compute_type),
23
- "large": whisperx.load_model("large", device, compute_type=compute_type),
24
  }
25
 
26
  def transcribe(audio_file, model_size="base", debug=False):
@@ -35,7 +35,7 @@ def transcribe(audio_file, model_size="base", debug=False):
35
  # Run inference
36
  model = models[model_size]
37
  batch_size = 8 if model_size == "tiny" else 4
38
- transcript = model.transcribe(audio, batch_size=batch_size, vad_method='silero')
39
 
40
  # Align whisper output
41
  model_a, metadata = whisperx.load_align_model(
 
17
 
18
  # Pre-load models
19
  models = {
20
+ "tiny": whisperx.load_model("tiny", device, compute_type=compute_type, vad_method='silero'),
21
+ "base": whisperx.load_model("base", device, compute_type=compute_type, vad_method='silero'),
22
+ "small": whisperx.load_model("small", device, compute_type=compute_type, vad_method='silero'),
23
+ "large": whisperx.load_model("large", device, compute_type=compute_type, vad_method='silero'),
24
  }
25
 
26
  def transcribe(audio_file, model_size="base", debug=False):
 
35
  # Run inference
36
  model = models[model_size]
37
  batch_size = 8 if model_size == "tiny" else 4
38
+ transcript = model.transcribe(audio, batch_size=batch_size)
39
 
40
  # Align whisper output
41
  model_a, metadata = whisperx.load_align_model(