jaisun2004's picture
Update app.py
2e174b1 verified
raw
history blame
2.07 kB
import gradio as gr
from transformers import pipeline
from langdetect import detect
def process_audio(audio_path):
try:
# Transcription
asr = pipeline("automatic-speech-recognition", model="openai/whisper-large")
result = asr(audio_path)
transcript = result["text"]
except Exception as e:
return f"Error in transcription: {e}", "", "", ""
try:
detected_lang = detect(transcript)
except Exception:
detected_lang = "unknown"
lang_map = {'en': 'English', 'hi': 'Hindi', 'ta': 'Tamil'}
lang_text = lang_map.get(detected_lang, detected_lang)
transcript_en = transcript
if detected_lang != "en":
try:
asr_translate = pipeline(
"automatic-speech-recognition",
model="openai/whisper-large",
task="translate"
)
result_translate = asr_translate(audio_path)
transcript_en = result_translate["text"]
except Exception as e:
transcript_en = f"Error translating: {e}"
try:
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
summary = summarizer(transcript_en, max_length=100, min_length=30, do_sample=False)
summary_text = summary[0]["summary_text"]
except Exception as e:
summary_text = f"Error summarizing: {e}"
return lang_text, transcript, transcript_en, summary_text
with gr.Blocks() as demo:
gr.Markdown("## Audio Transcript, Translation & Summary (Whisper + Hugging Face)")
audio_input = gr.Audio(source="upload", type="filepath", label="Upload MP3/WAV Audio")
btn = gr.Button("Process")
lang_out = gr.Textbox(label="Detected Language")
transcript_out = gr.Textbox(label="Original Transcript")
transcript_en_out = gr.Textbox(label="English Transcript (if translated)")
summary_out = gr.Textbox(label="Summary")
btn.click(
process_audio,
inputs=[audio_input],
outputs=[lang_out, transcript_out, transcript_en_out, summary_out]
)
demo.launch()