Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline | |
# Load models | |
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base") | |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") | |
# Function to process audio | |
def process_audio(audio_file): | |
# Step 1: Transcribe audio | |
transcription = transcriber(audio_file)["text"] | |
# Step 2: Summarize transcription | |
summary = summarizer(transcription, max_length=50, min_length=10, do_sample=False)[0]["summary_text"] | |
return transcription, summary | |
# Gradio Interface | |
interface = gr.Interface( | |
fn=process_audio, | |
inputs=gr.Audio(source="upload", type="filepath", label="Upload Audio File"), | |
outputs=[ | |
gr.Textbox(label="Full Transcription"), | |
gr.Textbox(label="Summary") | |
], | |
title="Audio Transcription and Summarization", | |
description="Upload an audio file to get a full transcription and a brief summary of its content." | |
) | |
# Launch the interface | |
interface.launch() | |