Shad0ws commited on
Commit
547133d
·
1 Parent(s): e208a40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -345,16 +345,15 @@ def create_ui(input_audio_max_duration, share=False, server_name: str = None, se
345
  ui.set_parallel_devices(vad_parallel_devices)
346
  ui.set_auto_parallel(auto_parallel)
347
 
348
- # ui_description = "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse "
349
- # ui_description += " audio and is also a multi-task model that can perform multilingual speech recognition "
350
- # ui_description += " as well as speech translation and language identification. "
351
-
352
- # ui_description += "\n\n\n\nFor longer audio files (>10 minutes) not in English, it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option."
353
 
354
  if input_audio_max_duration > 0:
355
  ui_description += "\n\n" + "Max audio file length: " + str(input_audio_max_duration) + " s"
356
 
357
- ui_article = "Read the [documentation here](https://gitlab.com/aadnk/whisper-webui/-/blob/main/docs/options.md)"
358
 
359
  demo = gr.Interface(fn=ui.transcribe_webui, description=ui_description, article=ui_article, inputs=[
360
  gr.Dropdown(choices=WHISPER_MODELS, value=default_model_name, label="Model"),
 
345
  ui.set_parallel_devices(vad_parallel_devices)
346
  ui.set_auto_parallel(auto_parallel)
347
 
348
+ ui_description = "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse "
349
+ ui_description += " audio and is also a multi-task model that can perform multilingual speech recognition "
350
+ ui_description += " as well as speech translation and language identification. "
351
+ ui_description += "\n\n\n\nFor longer audio files (>10 minutes) not in English, it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option."
 
352
 
353
  if input_audio_max_duration > 0:
354
  ui_description += "\n\n" + "Max audio file length: " + str(input_audio_max_duration) + " s"
355
 
356
+ # ui_article = "Read the [documentation here](https://gitlab.com/aadnk/whisper-webui/-/blob/main/docs/options.md)"
357
 
358
  demo = gr.Interface(fn=ui.transcribe_webui, description=ui_description, article=ui_article, inputs=[
359
  gr.Dropdown(choices=WHISPER_MODELS, value=default_model_name, label="Model"),