Spaces:
Runtime error
Runtime error
Commit
·
d099ce5
1
Parent(s):
d4a060a
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import whisper
|
|
|
4 |
import requests
|
5 |
from pytube import YouTube
|
6 |
|
@@ -38,9 +39,11 @@ def transcribe(audio):
|
|
38 |
#print("Language Spoken: " + transcription.language)
|
39 |
#print("Transcript: " + transcription.text)
|
40 |
#print("Translated: " + translation.text)
|
|
|
|
|
41 |
|
42 |
|
43 |
-
return result["text"]
|
44 |
|
45 |
def transcribe_upload(audio):
|
46 |
return transcribe(audio)
|
@@ -151,9 +154,10 @@ with gr.Blocks(css = css) as demo:
|
|
151 |
|
152 |
with gr.Row():
|
153 |
transcript_output = gr.Textbox(label="Transcription in the language spoken", lines = 20)
|
|
|
154 |
|
155 |
-
transcribe_audio_yt.click(transcribe_yt, inputs = yt_input, outputs = transcript_output)
|
156 |
-
transcribe_audio_u.click(transcribe_upload, inputs = audio_input_u, outputs = transcript_output)
|
157 |
gr.HTML('''
|
158 |
<div class="footer">
|
159 |
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a>
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import whisper
|
4 |
+
from whisper.utils import write_vtt
|
5 |
import requests
|
6 |
from pytube import YouTube
|
7 |
|
|
|
39 |
#print("Language Spoken: " + transcription.language)
|
40 |
#print("Transcript: " + transcription.text)
|
41 |
#print("Translated: " + translation.text)
|
42 |
+
with open('sub.vtt', "w") as f:
|
43 |
+
write_vtt(result["segments"], file=f)
|
44 |
|
45 |
|
46 |
+
return result["text"], "sub.vtt"
|
47 |
|
48 |
def transcribe_upload(audio):
|
49 |
return transcribe(audio)
|
|
|
154 |
|
155 |
with gr.Row():
|
156 |
transcript_output = gr.Textbox(label="Transcription in the language spoken", lines = 20)
|
157 |
+
transcript_file = gr.File()
|
158 |
|
159 |
+
transcribe_audio_yt.click(transcribe_yt, inputs = yt_input, outputs = [transcript_output, transcript_file])
|
160 |
+
transcribe_audio_u.click(transcribe_upload, inputs = audio_input_u, outputs = [transcript_output, transcript_file])
|
161 |
gr.HTML('''
|
162 |
<div class="footer">
|
163 |
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a>
|