Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ if DEVICE == "auto":
|
|
12 |
print(f"[SYSTEM] | Using {DEVICE} type compute device.")
|
13 |
|
14 |
# Variables
|
|
|
15 |
BATCH_SIZE = 8
|
16 |
|
17 |
repo = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3-turbo", chunk_length_s=30, device=DEVICE)
|
@@ -25,7 +26,7 @@ footer {
|
|
25 |
'''
|
26 |
|
27 |
@spaces.GPU(duration=15)
|
28 |
-
def transcribe(inputs, task
|
29 |
if inputs is None: raise gr.Error("Invalid input.")
|
30 |
output = repo(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
31 |
return output
|
@@ -40,7 +41,7 @@ with gr.Blocks(css=css) as main:
|
|
40 |
|
41 |
with gr.Column():
|
42 |
input = gr.Audio(sources="upload", type="filepath", label="Input"),
|
43 |
-
task = gr.Radio(["transcribe", "translate"], label="Task", value=
|
44 |
submit = gr.Button("▶")
|
45 |
maintain = gr.Button("☁️")
|
46 |
|
|
|
12 |
print(f"[SYSTEM] | Using {DEVICE} type compute device.")
|
13 |
|
14 |
# Variables
|
15 |
+
DEFAULT_TASK = "transcribe"
|
16 |
BATCH_SIZE = 8
|
17 |
|
18 |
repo = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3-turbo", chunk_length_s=30, device=DEVICE)
|
|
|
26 |
'''
|
27 |
|
28 |
@spaces.GPU(duration=15)
|
29 |
+
def transcribe(inputs=None, task=DEFAULT_TASK:
|
30 |
if inputs is None: raise gr.Error("Invalid input.")
|
31 |
output = repo(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
32 |
return output
|
|
|
41 |
|
42 |
with gr.Column():
|
43 |
input = gr.Audio(sources="upload", type="filepath", label="Input"),
|
44 |
+
task = gr.Radio(["transcribe", "translate"], label="Task", value=DEFAULT_TASK),
|
45 |
submit = gr.Button("▶")
|
46 |
maintain = gr.Button("☁️")
|
47 |
|