File size: 1,625 Bytes
a3c1698
 
4946e6a
08e100b
 
 
 
0bc0be3
3ef3d4d
 
08e100b
0bc0be3
4bdff08
08e100b
 
3ef3d4d
475d49e
8b2a016
 
 
 
 
 
 
 
33aa15e
e21be95
6d31a11
3ef3d4d
 
f9b23de
3ef3d4d
 
 
 
 
 
f9b23de
d0947da
0bc0be3
 
 
 
3ef3d4d
 
 
 
0bc0be3
 
 
 
 
 
45377c6
 
0bc0be3
 
 
 
4ae584a
0bc0be3
a53e625
548b077
0bc0be3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Imports
import gradio as gr
import spaces
import torch

from transformers import pipeline

# Pre-Initialize
DEVICE = -1  # -1 indicates CPU for transformers pipeline
print("[SYSTEM] | Using CPU type compute device.")

# Variables
DEFAULT_TASK = "transcribe"
BATCH_SIZE = 8

repo = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3-turbo", chunk_length_s=30, device=DEVICE  # Ensures CPU usage)

css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
    visibility: hidden
}
'''

# Functions
def transcribe(input=None, task=DEFAULT_TASK):
    print(input)
    if input is None:
        raise gr.Error("Invalid input.")
        
    output = repo(
        input,
        batch_size=BATCH_SIZE,
        generate_kwargs={"task": task},
        return_timestamps=True
    )["text"]
    
    return output

def cloud():
    print("[CLOUD] | Space maintained.")

@spaces.GPU(duration=60)
def gpu():
    return

# Initialize
with gr.Blocks(css=css) as main:
    with gr.Column():
        gr.Markdown("🪄 Transcribe audio to text.")
        
    with gr.Column():
        input = gr.Audio(sources="upload", type="filepath", label="Input")
        task = gr.Radio(["transcribe", "translate"], label="Task", value=DEFAULT_TASK)
        submit = gr.Button("▶")
        maintain = gr.Button("☁️")

    with gr.Column():
        output = gr.Textbox(lines=1, value="", label="Output")
            
    submit.click(transcribe, inputs=[input, task], outputs=[output], queue=False)
    maintain.click(cloud, inputs=[], outputs=[], queue=False)

main.launch(show_api=True)