Athspi commited on
Commit
c679791
·
verified ·
1 Parent(s): 1ba1d48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -132,9 +132,6 @@ LANGUAGE_NAME_TO_CODE = {
132
  # Reverse mapping of language codes to full language names
133
  CODE_TO_LANGUAGE_NAME = {v: k for k, v in LANGUAGE_NAME_TO_CODE.items()}
134
 
135
- # Device and compute type for faster-whisper
136
- device, torch_dtype = ("cuda", "float32") if torch.cuda.is_available() else ("cpu", "int8")
137
-
138
  def detect_language(audio_file):
139
  """Detect the language of the audio file."""
140
  # Load the Whisper model (use "base" for faster detection)
@@ -186,8 +183,12 @@ def transcribe_audio(audio_file, language="Auto Detect", model_size="Base (Faste
186
  else:
187
  # Use the selected Whisper model
188
  if model_size == "Systran Faster Whisper Large v3":
 
 
 
 
189
  # Use faster-whisper for the Systran model
190
- model = WhisperModel(MODELS[model_size], device=device, compute_type=torch_dtype)
191
  segments, info = model.transcribe(
192
  processed_audio_path,
193
  task="transcribe",
 
132
  # Reverse mapping of language codes to full language names
133
  CODE_TO_LANGUAGE_NAME = {v: k for k, v in LANGUAGE_NAME_TO_CODE.items()}
134
 
 
 
 
135
  def detect_language(audio_file):
136
  """Detect the language of the audio file."""
137
  # Load the Whisper model (use "base" for faster detection)
 
183
  else:
184
  # Use the selected Whisper model
185
  if model_size == "Systran Faster Whisper Large v3":
186
+ # Define device and compute type for faster-whisper
187
+ device = "cuda" if torch.cuda.is_available() else "cpu"
188
+ compute_type = "float32" if device == "cuda" else "int8"
189
+
190
  # Use faster-whisper for the Systran model
191
+ model = WhisperModel(MODELS[model_size], device=device, compute_type=compute_type)
192
  segments, info = model.transcribe(
193
  processed_audio_path,
194
  task="transcribe",