Spaces:
Runtime error
Runtime error
Michael Natanael
commited on
Commit
·
3347b23
1
Parent(s):
478e07c
change model to distil-large-v3 with 4 cpu_threads
Browse files
app.py
CHANGED
@@ -51,14 +51,20 @@ model.eval()
|
|
51 |
|
52 |
# === INITIAL SETUP: Faster Whisper ===
|
53 |
# https://github.com/SYSTRAN/faster-whisper
|
54 |
-
faster_whisper_model_size = "large-v3"
|
|
|
55 |
|
56 |
# Run on GPU with FP16
|
57 |
# model = WhisperModel(model_size, device="cuda", compute_type="float16")
|
58 |
# or run on GPU with INT8
|
59 |
# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
|
60 |
# or run on CPU with INT8
|
61 |
-
faster_whisper_model = WhisperModel(
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
|
64 |
def faster_whisper(temp_audio_path):
|
|
|
51 |
|
52 |
# === INITIAL SETUP: Faster Whisper ===
|
53 |
# https://github.com/SYSTRAN/faster-whisper
|
54 |
+
# faster_whisper_model_size = "large-v3"
|
55 |
+
faster_whisper_model_size = "distil-large-v3"
|
56 |
|
57 |
# Run on GPU with FP16
|
58 |
# model = WhisperModel(model_size, device="cuda", compute_type="float16")
|
59 |
# or run on GPU with INT8
|
60 |
# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
|
61 |
# or run on CPU with INT8
|
62 |
+
faster_whisper_model = WhisperModel(
|
63 |
+
faster_whisper_model_size,
|
64 |
+
device="cpu",
|
65 |
+
compute_type="int8",
|
66 |
+
cpu_threads=4
|
67 |
+
)
|
68 |
|
69 |
|
70 |
def faster_whisper(temp_audio_path):
|