Spaces:
Runtime error
Runtime error
Commit
·
acdefab
1
Parent(s):
af7d63c
Update app.py
Browse files
app.py
CHANGED
|
@@ -321,19 +321,19 @@ with gr.Blocks() as yav_ui:
|
|
| 321 |
with gr.Row():
|
| 322 |
with gr.Column():
|
| 323 |
with gr.Tab("Youtube", id=1):
|
| 324 |
-
ysz = gr.Dropdown(label="Model Size", choices=wispher_models, value='base')
|
| 325 |
yinput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
| 326 |
yinput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
| 327 |
yinput = gr.Textbox(label="Youtube Link", placeholder="https://www.youtube.com/watch?v=GECcjrYHH8w")
|
| 328 |
ybutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
| 329 |
with gr.Tab("Video", id=2):
|
| 330 |
-
vsz = gr.Dropdown(label="Model Size", choices=wispher_models
|
| 331 |
vinput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
| 332 |
vinput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
| 333 |
vinput = gr.Video(label="Video")
|
| 334 |
vbutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
| 335 |
with gr.Tab("Audio", id=3):
|
| 336 |
-
asz = gr.Dropdown(label="Model Size", choices=wispher_models, value='base')
|
| 337 |
ainput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
| 338 |
ainput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
| 339 |
ainput = gr.Audio(label="Audio", type="filepath")
|
|
@@ -345,17 +345,17 @@ with gr.Blocks() as yav_ui:
|
|
| 345 |
output_json = gr.JSON(label="Transcribed JSON")
|
| 346 |
ybutton_transcribe.click(
|
| 347 |
fn=YoutubeTranscribe,
|
| 348 |
-
inputs=[yinput_nos,yinput_sn,yinput],
|
| 349 |
outputs=[output_textbox,output_json]
|
| 350 |
)
|
| 351 |
abutton_transcribe.click(
|
| 352 |
fn=AudioTranscribe,
|
| 353 |
-
inputs=[ainput_nos,ainput_sn,ainput],
|
| 354 |
outputs=[output_textbox,output_json]
|
| 355 |
)
|
| 356 |
vbutton_transcribe.click(
|
| 357 |
fn=VideoTranscribe,
|
| 358 |
-
inputs=[vinput_nos,vinput_sn,vinput],
|
| 359 |
outputs=[output_textbox,output_json]
|
| 360 |
)
|
| 361 |
yav_ui.launch(debug=True)
|
|
|
|
| 321 |
with gr.Row():
|
| 322 |
with gr.Column():
|
| 323 |
with gr.Tab("Youtube", id=1):
|
| 324 |
+
ysz = gr.Dropdown(label="Model Size", choices=wispher_models , value='base')
|
| 325 |
yinput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
| 326 |
yinput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
| 327 |
yinput = gr.Textbox(label="Youtube Link", placeholder="https://www.youtube.com/watch?v=GECcjrYHH8w")
|
| 328 |
ybutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
| 329 |
with gr.Tab("Video", id=2):
|
| 330 |
+
vsz = gr.Dropdown(label="Model Size", choices=wispher_models)
|
| 331 |
vinput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
| 332 |
vinput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
| 333 |
vinput = gr.Video(label="Video")
|
| 334 |
vbutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
| 335 |
with gr.Tab("Audio", id=3):
|
| 336 |
+
asz = gr.Dropdown(label="Model Size", choices=wispher_models , value='base')
|
| 337 |
ainput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
| 338 |
ainput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
| 339 |
ainput = gr.Audio(label="Audio", type="filepath")
|
|
|
|
| 345 |
output_json = gr.JSON(label="Transcribed JSON")
|
| 346 |
ybutton_transcribe.click(
|
| 347 |
fn=YoutubeTranscribe,
|
| 348 |
+
inputs=[yinput_nos,yinput_sn,yinput, ysz],
|
| 349 |
outputs=[output_textbox,output_json]
|
| 350 |
)
|
| 351 |
abutton_transcribe.click(
|
| 352 |
fn=AudioTranscribe,
|
| 353 |
+
inputs=[ainput_nos,ainput_sn,ainput], asz,
|
| 354 |
outputs=[output_textbox,output_json]
|
| 355 |
)
|
| 356 |
vbutton_transcribe.click(
|
| 357 |
fn=VideoTranscribe,
|
| 358 |
+
inputs=[vinput_nos,vinput_sn,vinput, vsz],
|
| 359 |
outputs=[output_textbox,output_json]
|
| 360 |
)
|
| 361 |
yav_ui.launch(debug=True)
|