File size: 1,182 Bytes
c0c1d35
 
659d1cf
c0c1d35
8c0f874
659d1cf
 
 
 
 
 
 
 
c0c1d35
ecc74bd
3786422
c0c1d35
 
659d1cf
 
c0c1d35
659d1cf
 
c0c1d35
659d1cf
 
c0c1d35
 
 
 
ecc74bd
8c0f874
c0c1d35
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
import whisper
from faster_whisper import WhisperModel

model_size = 'large-v3'
#model = whisper.load_model(model_size)
#model = WhisperModel(model_size, device="cuda", compute_type="float16")
model = WhisperModel(model_size, compute_type="float16")

# or run on GPU with INT8
# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
# or run on CPU with INT8
# model = WhisperModel(model_size, device="cpu", compute_type="int8")

def speech_to_text(audio_file, _model_size):
    global model_size, model
    if model_size != _model_size:
        model_size = _model_size
        #model = whisper.load_model(model_size)
        model = WhisperModel(model_size, compute_type="float16")

    #result = model.transcribe(audio_file)
    segments, info = model.transcribe(audio_file, beam_size=5)

    # return result["text"]
    return "".join([segment.text for segment in segments])

gr.Interface(
    fn=speech_to_text,
    inputs=[
        gr.Audio(source="upload", type="filepath"),
        gr.Dropdown(value=model_size, choices=["tiny", "base", "small", "medium", "large", "large-v2", "large-v3"]),
        ],
    outputs="text").launch()