Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline | |
# Load Whisper model | |
model_name = "AventIQ-AI/whisper-speech-text" | |
stt_pipeline = pipeline("automatic-speech-recognition", model=model_name) | |
def transcribe(audio_path): | |
"""Transcribe speech to text using Whisper.""" | |
if audio_path is None: | |
return "⚠️ Please upload or record an audio file." | |
try: | |
# Pass the file path directly to the Whisper pipeline | |
result = stt_pipeline(audio_path) | |
return f"📝 **Transcription:**\n{result['text']}" | |
except Exception as e: | |
return f"❌ Error processing audio: {str(e)}" | |
# Create Enhanced Gradio Interface | |
with gr.Blocks(theme="default") as demo: | |
gr.Markdown( | |
""" | |
# 🎤 **Whisper Speech-to-Text** | |
**Upload or record an audio file** and this tool will convert your speech into text using **AventIQ-AI Whisper Model**. | |
Supports **MP3, WAV, FLAC** formats. | |
""" | |
) | |
with gr.Row(): | |
audio_input = gr.Audio(type="filepath", label="🎙️ Upload or Record Your Voice") | |
transcribed_text = gr.Textbox(label="📝 Transcription", interactive=False) | |
submit_btn = gr.Button("🎧 Transcribe", variant="primary") | |
submit_btn.click(transcribe, inputs=audio_input, outputs=transcribed_text) | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch() | |