|
import gradio as gr
|
|
from transformers import pipeline
|
|
|
|
|
|
model_name = "AventIQ-AI/whisper_small_Automatic_speech_recognition"
|
|
asr_pipeline = pipeline("automatic-speech-recognition", model=model_name)
|
|
|
|
def transcribe_audio(audio):
|
|
if audio is None:
|
|
return "β οΈ Please upload or record an audio file."
|
|
|
|
transcript = asr_pipeline(audio)["text"]
|
|
return transcript if transcript else "β οΈ No speech detected."
|
|
|
|
|
|
with gr.Blocks() as demo:
|
|
gr.Markdown("## π€ Whisper Small - Speech to Text")
|
|
gr.Markdown("Upload an audio file or record your voice to get a transcript.")
|
|
|
|
audio_input = gr.Audio(type="filepath", interactive=True, label="ποΈ Upload or Record Audio")
|
|
transcribe_button = gr.Button("π Transcribe")
|
|
output_text = gr.Textbox(label="π Transcription Output")
|
|
|
|
transcribe_button.click(transcribe_audio, inputs=audio_input, outputs=output_text)
|
|
|
|
|
|
demo.launch() |