Spaces:
Sleeping
Sleeping
File size: 1,688 Bytes
53e911a bfbc50c 53e911a bfbc50c 53e911a bfbc50c 53e911a bfbc50c 53e911a bfbc50c 53e911a c5ac114 53e911a cffad2b 53e911a cffad2b 53e911a cffad2b 53e911a cffad2b 53e911a cffad2b 53e911a c5ac114 53e911a c5ac114 53e911a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from transformers import pipeline
import gradio as gr
pipe = pipeline(model="jsbeaudry/creole-speech-to-text")
def transcribe(audio):
text = pipe(audio)["text"]
return text
iface = gr.Interface(
fn=transcribe,
inputs=gr.Audio(type="filepath"),
outputs="text",
title="Whisper medium Creole",
description="Realtime demo for Haitian Creole speech recognition using a fine-tuned medium small model.",
)
iface.launch()
# from transformers import pipeline
# import gradio as gr
# import torch
# from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
# from datasets import load_dataset
# device = "cuda:0" if torch.cuda.is_available() else "cpu"
# torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
# model_id = "jsbeaudry/creole-speech-to-text"
# model = AutoModelForSpeechSeq2Seq.from_pretrained(
# model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
# )
# model.to(device)
# processor = AutoProcessor.from_pretrained(model_id)
# pipe = pipeline(
# "automatic-speech-recognition",
# model=model,
# tokenizer=processor.tokenizer,
# feature_extractor=processor.feature_extractor,
# torch_dtype=torch_dtype,
# device=device,
# )
# def transcribe(audio):
# # Use the 'whisper' pipeline defined in the previous cell
# text = pipe(audio)["text"]
# return text
# iface = gr.Interface(
# fn=transcribe,
# inputs=gr.Audio(type="filepath"),
# outputs="text",
# title="Whisper medium Creole",
# description="Realtime demo for Haitian Creole speech recognition using a fine-tuned medium small model.",
# )
# iface.launch() |