Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
import whisper | |
import requests | |
from pytube import YouTube | |
### ββββββββββββββββββββββββββββββββββββββββ | |
title="Transcript PDF" | |
### ββββββββββββββββββββββββββββββββββββββββ | |
whisper_model = whisper.load_model("medium") | |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
def transcribe(audio): | |
print(""" | |
β | |
Sending audio to Whisper ... | |
β | |
""") | |
audio = whisper.load_audio(audio) | |
audio = whisper.pad_or_trim(audio) | |
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device) | |
_, probs = whisper_model.detect_language(mel) | |
transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False) | |
translate_options = whisper.DecodingOptions(task="translate", fp16 = False) | |
transcription = whisper.decode(whisper_model, mel, transcript_options) | |
translation = whisper.decode(whisper_model, mel, translate_options) | |
print("Language Spoken: " + transcription.language) | |
print("Transcript: " + transcription.text) | |
print("Translated: " + translation.text) | |
return transcription.text | |
def transcribe_upload(audio): | |
return transcribe(audio) | |
def transcribe_yt(link): | |
yt = YouTube(link) | |
path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp3") | |
return transcribe(path) | |
with gr.Blocks(css = css) as demo: | |
gr.Markdown(""" | |
## Multi-lingual Transcript Generator | |
""") | |
gr.HTML(''' | |
<p style="margin-bottom: 10px"> | |
Save Transcripts of videos as PDF with the help of Whisper, which is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification. | |
</p> | |
''') | |
with gr.Column(): | |
#gr.Markdown(""" ### Record audio """) | |
with gr.Tab("Youtube Link"): | |
yt_input = gr.Textbox(label = 'Youtube Link') | |
transcribe_audio_yt = gr.Button('Transcribe') | |
with gr.Tab("Upload Podcast as File"): | |
audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath") | |
transcribe_audio_u = gr.Button('Transcribe') | |
with gr.Row(): | |
transcript_output = gr.Textbox(label="Transcription in the language spoken", lines = 20) | |
summary_output = gr.Textbox(label = "English Summary", lines = 10) | |
transcribe_audio_yt.click(transcribe_yt, inputs = yt_input, outputs = [transcript_output, summary_output]) | |
transcribe_audio_u.click(transcribe_upload, inputs = audio_input_u, outputs = [transcript_output,summary_output]) | |
gr.HTML(''' | |
<div class="footer"> | |
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> | |
</p> | |
</div> | |
''') | |
demo.queue() | |
demo.launch() |