Transcript_PDF / app.py
RamAnanth1's picture
Update app.py
4d73b25
import gradio as gr
import torch
import whisper
from whisper.utils import write_vtt
import requests
from pytube import YouTube
### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
title="Transcript PDF"
### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
whisper_model = whisper.load_model("medium")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def transcribe(audio):
print("""
β€”
Sending audio to Whisper ...
β€”
""")
#audio = whisper.load_audio(audio)
#audio = whisper.pad_or_trim(audio)
#mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
#_, probs = whisper_model.detect_language(mel)
#transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
#translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
#transcription = whisper.decode(whisper_model, mel, transcript_options)
#translation = whisper.decode(whisper_model, mel, translate_options)
result = whisper_model.transcribe(audio)
#print("Language Spoken: " + transcription.language)
#print("Transcript: " + transcription.text)
#print("Translated: " + translation.text)
with open('sub.vtt', "w") as f:
write_vtt(result["segments"], file=f)
return result["text"], "sub.vtt"
def transcribe_upload(audio):
return transcribe(audio)
def transcribe_yt(link):
yt = YouTube(link)
path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp3")
return transcribe(path)
css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: black;
background: black;
}
input[type='range'] {
accent-color: black;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 880px;
margin: auto;
padding-top: 1.5rem;
}
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
}
#gallery>div>.h-full {
min-height: 20rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
#advanced-btn {
font-size: .7rem !important;
line-height: 19px;
margin-top: 12px;
margin-bottom: 12px;
padding: 2px 8px;
border-radius: 14px !important;
}
#advanced-options {
display: none;
margin-bottom: 20px;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
"""
with gr.Blocks(css = css) as demo:
gr.Markdown("""
## Transcript Generator
""")
gr.HTML('''
<p style="margin-bottom: 10px">
Save Transcripts of videos as PDF with the help of Whisper, which is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification.
</p>
''')
with gr.Column():
#gr.Markdown(""" ### Record audio """)
with gr.Tab("Youtube Link"):
yt_input = gr.Textbox(label = 'Youtube Link')
transcribe_audio_yt = gr.Button('Transcribe')
with gr.Tab("Upload as File"):
audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath")
transcribe_audio_u = gr.Button('Transcribe')
with gr.Row():
transcript_output = gr.Textbox(label="Transcript", lines = 20)
transcript_file = gr.File()
transcribe_audio_yt.click(transcribe_yt, inputs = yt_input, outputs = [transcript_output, transcript_file])
transcribe_audio_u.click(transcribe_upload, inputs = audio_input_u, outputs = [transcript_output, transcript_file])
gr.HTML('''
<div class="footer">
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a>
</p>
</div>
''')
demo.queue()
demo.launch()