raihanrifaldi commited on
Commit
8bedf81
·
1 Parent(s): 7e18c2b

kembali ke file sebelumnya

Browse files
Files changed (1) hide show
  1. app.py +147 -146
app.py CHANGED
@@ -29,105 +29,105 @@ import psutil
29
 
30
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
31
  source_languages = {
32
- "English": "English",
33
- # "zh": "Chinese",
34
- # "de": "German",
35
- # "es": "Spanish",
36
- # "ru": "Russian",
37
- # "ko": "Korean",
38
- # "fr": "French",
39
- "Japan": "Japanese",
40
- # "pt": "Portuguese",
41
- # "tr": "Turkish",
42
- # "pl": "Polish",
43
- # "ca": "Catalan",
44
- # "nl": "Dutch",
45
- # "ar": "Arabic",
46
- # "sv": "Swedish",
47
- # "it": "Italian",
48
- "Indonesia": "Indonesian",
49
- # "hi": "Hindi",
50
- # "fi": "Finnish",
51
- # "vi": "Vietnamese",
52
- # "he": "Hebrew",
53
- # "uk": "Ukrainian",
54
- # "el": "Greek",
55
- "Malaysia": "Malay",
56
- # "cs": "Czech",
57
- # "ro": "Romanian",
58
- # "da": "Danish",
59
- # "hu": "Hungarian",
60
- # "ta": "Tamil",
61
- # "no": "Norwegian",
62
- # "th": "Thai",
63
- # "ur": "Urdu",
64
- # "hr": "Croatian",
65
- # "bg": "Bulgarian",
66
- # "lt": "Lithuanian",
67
- # "la": "Latin",
68
- # "mi": "Maori",
69
- # "ml": "Malayalam",
70
- # "cy": "Welsh",
71
- # "sk": "Slovak",
72
- # "te": "Telugu",
73
- # "fa": "Persian",
74
- # "lv": "Latvian",
75
- # "bn": "Bengali",
76
- # "sr": "Serbian",
77
- # "az": "Azerbaijani",
78
- # "sl": "Slovenian",
79
- # "kn": "Kannada",
80
- # "et": "Estonian",
81
- # "mk": "Macedonian",
82
- # "br": "Breton",
83
- # "eu": "Basque",
84
- # "is": "Icelandic",
85
- # "hy": "Armenian",
86
- # "ne": "Nepali",
87
- # "mn": "Mongolian",
88
- # "bs": "Bosnian",
89
- # "kk": "Kazakh",
90
- # "sq": "Albanian",
91
- # "sw": "Swahili",
92
- # "gl": "Galician",
93
- # "mr": "Marathi",
94
- # "pa": "Punjabi",
95
- # "si": "Sinhala",
96
- # "km": "Khmer",
97
- # "sn": "Shona",
98
- # "yo": "Yoruba",
99
- # "so": "Somali",
100
- # "af": "Afrikaans",
101
- # "oc": "Occitan",
102
- # "ka": "Georgian",
103
- # "be": "Belarusian",
104
- # "tg": "Tajik",
105
- # "sd": "Sindhi",
106
- # "gu": "Gujarati",
107
- # "am": "Amharic",
108
- # "yi": "Yiddish",
109
- # "lo": "Lao",
110
- # "uz": "Uzbek",
111
- # "fo": "Faroese",
112
- # "ht": "Haitian creole",
113
- # "ps": "Pashto",
114
- # "tk": "Turkmen",
115
- # "nn": "Nynorsk",
116
- # "mt": "Maltese",
117
- # "sa": "Sanskrit",
118
- # "lb": "Luxembourgish",
119
- # "my": "Myanmar",
120
- # "bo": "Tibetan",
121
- # "tl": "Tagalog",
122
- # "mg": "Malagasy",
123
- # "as": "Assamese",
124
- # "tt": "Tatar",
125
- # "haw": "Hawaiian",
126
- # "ln": "Lingala",
127
- # "ha": "Hausa",
128
- # "ba": "Bashkir",
129
- # "jw": "Javanese",
130
- # "su": "Sundanese",
131
  }
132
 
133
  source_language_list = [key[0] for key in source_languages.items()]
@@ -227,7 +227,9 @@ def speech_to_text(video_file_path, selected_source_lang, whisper_model, num_spe
227
  Speaker diarization model and pipeline from by https://github.com/pyannote/pyannote-audio
228
  """
229
 
230
- model = WhisperModel(whisper_model, device="cuda", compute_type="int8_float16")
 
 
231
  time_start = time.time()
232
  if(video_file_path == None):
233
  raise ValueError("Error no video input")
@@ -349,13 +351,13 @@ video_in = gr.Video(label="Video file", mirror_webcam=False)
349
  youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
350
  df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
351
  memory = psutil.virtual_memory()
352
- selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="Indonesia", label="Bahasa yang digunakan dalam Video", interactive=True)
353
- selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Whisper model", interactive=True)
354
- number_speakers = gr.Number(precision=0, value=0, label="Masukkan jumlah pembicara untuk hasil yang lebih baik. *Jika nilai=0, model akan otomatis menemukan jumlah pembicara terbaik", interactive=True)
355
  system_info = gr.Markdown(f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*")
356
  download_transcript = gr.File(label="Download transcript")
357
  transcription_df = gr.DataFrame(value=df_init,label="Transcription dataframe", row_count=(0, "dynamic"), max_rows = 10, wrap=True, overflow_row_behaviour='paginate')
358
- title = "SPERCO"
359
  demo = gr.Blocks(title=title)
360
  demo.encrypt = False
361
 
@@ -364,19 +366,18 @@ with demo:
364
  with gr.Tab("Whisper speaker diarization"):
365
  gr.Markdown('''
366
  <div>
367
- <h1 style='text-align: center'>SPERCO</h1>
368
- Teknologi ini menggunakan Whisper models dari <a href='https://github.com/openai/whisper' target='_blank'><b>OpenAI</b></a> with <a href='https://github.com/guillaumekln/faster-whisper' target='_blank'><b>CTranslate2</b></a> yang merupakan mesin inferensi cepat untuk model Transformer untuk mengenali ucapan (4 kali lebih cepat dari model openai asli dengan akurasi yang sama)
369
- dan model ECAPA-TDNN dari <a href='https://github.com/speechbrain/speechbrain' target='_blank'><b>SpeechBrain</b></a> untuk mengkodekan dan mengklasifikasikan pembicara.
370
-
371
  </div>
372
  ''')
373
 
374
  with gr.Row():
375
  gr.Markdown('''
376
  ### Transcribe youtube link using OpenAI Whisper
377
- ##### 1. Menggunakan model Whisper dari OpenAI untuk memisahkan audio menjadi segmen dan menghasilkan transkripsi.
378
- ##### 2. Menghasilkan pengkodan pembicara untuk setiap segmen.
379
- ##### 3. Menerapkan pengelompokan aglomeratif pada pengkodean untuk mengidentifikasi pembicara untuk setiap segmen.
380
  ''')
381
 
382
  with gr.Row():
@@ -432,41 +433,41 @@ with demo:
432
 
433
 
434
 
435
- # with gr.Tab("Whisper Transcribe Japanese Audio"):
436
- # gr.Markdown(f'''
437
- # <div>
438
- # <h1 style='text-align: center'>Whisper Transcribe Japanese Audio</h1>
439
- # </div>
440
- # Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned
441
- # checkpoint <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
442
- # ''')
443
- # microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
444
- # upload = gr.inputs.Audio(source="upload", type="filepath", optional=True)
445
- # transcribe_btn = gr.Button("Transcribe Audio")
446
- # text_output = gr.Textbox()
447
- # with gr.Row():
448
- # gr.Markdown('''
449
- # ### You can test by following examples:
450
- # ''')
451
- # examples = gr.Examples(examples=
452
- # [ "sample1.wav",
453
- # "sample2.wav",
454
- # ],
455
- # label="Examples", inputs=[upload])
456
- # transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output)
457
 
458
- # with gr.Tab("Whisper Transcribe Japanese YouTube"):
459
- # gr.Markdown(f'''
460
- # <div>
461
- # <h1 style='text-align: center'>Whisper Transcribe Japanese YouTube</h1>
462
- # </div>
463
- # Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint:
464
- # <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
465
- # ''')
466
- # youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True)
467
- # yt_transcribe_btn = gr.Button("Transcribe YouTube")
468
- # text_output2 = gr.Textbox()
469
- # html_output = gr.Markdown()
470
- # yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2])
471
 
472
  demo.launch(debug=True)
 
29
 
30
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
31
  source_languages = {
32
+ "en": "English",
33
+ "zh": "Chinese",
34
+ "de": "German",
35
+ "es": "Spanish",
36
+ "ru": "Russian",
37
+ "ko": "Korean",
38
+ "fr": "French",
39
+ "ja": "Japanese",
40
+ "pt": "Portuguese",
41
+ "tr": "Turkish",
42
+ "pl": "Polish",
43
+ "ca": "Catalan",
44
+ "nl": "Dutch",
45
+ "ar": "Arabic",
46
+ "sv": "Swedish",
47
+ "it": "Italian",
48
+ "id": "Indonesian",
49
+ "hi": "Hindi",
50
+ "fi": "Finnish",
51
+ "vi": "Vietnamese",
52
+ "he": "Hebrew",
53
+ "uk": "Ukrainian",
54
+ "el": "Greek",
55
+ "ms": "Malay",
56
+ "cs": "Czech",
57
+ "ro": "Romanian",
58
+ "da": "Danish",
59
+ "hu": "Hungarian",
60
+ "ta": "Tamil",
61
+ "no": "Norwegian",
62
+ "th": "Thai",
63
+ "ur": "Urdu",
64
+ "hr": "Croatian",
65
+ "bg": "Bulgarian",
66
+ "lt": "Lithuanian",
67
+ "la": "Latin",
68
+ "mi": "Maori",
69
+ "ml": "Malayalam",
70
+ "cy": "Welsh",
71
+ "sk": "Slovak",
72
+ "te": "Telugu",
73
+ "fa": "Persian",
74
+ "lv": "Latvian",
75
+ "bn": "Bengali",
76
+ "sr": "Serbian",
77
+ "az": "Azerbaijani",
78
+ "sl": "Slovenian",
79
+ "kn": "Kannada",
80
+ "et": "Estonian",
81
+ "mk": "Macedonian",
82
+ "br": "Breton",
83
+ "eu": "Basque",
84
+ "is": "Icelandic",
85
+ "hy": "Armenian",
86
+ "ne": "Nepali",
87
+ "mn": "Mongolian",
88
+ "bs": "Bosnian",
89
+ "kk": "Kazakh",
90
+ "sq": "Albanian",
91
+ "sw": "Swahili",
92
+ "gl": "Galician",
93
+ "mr": "Marathi",
94
+ "pa": "Punjabi",
95
+ "si": "Sinhala",
96
+ "km": "Khmer",
97
+ "sn": "Shona",
98
+ "yo": "Yoruba",
99
+ "so": "Somali",
100
+ "af": "Afrikaans",
101
+ "oc": "Occitan",
102
+ "ka": "Georgian",
103
+ "be": "Belarusian",
104
+ "tg": "Tajik",
105
+ "sd": "Sindhi",
106
+ "gu": "Gujarati",
107
+ "am": "Amharic",
108
+ "yi": "Yiddish",
109
+ "lo": "Lao",
110
+ "uz": "Uzbek",
111
+ "fo": "Faroese",
112
+ "ht": "Haitian creole",
113
+ "ps": "Pashto",
114
+ "tk": "Turkmen",
115
+ "nn": "Nynorsk",
116
+ "mt": "Maltese",
117
+ "sa": "Sanskrit",
118
+ "lb": "Luxembourgish",
119
+ "my": "Myanmar",
120
+ "bo": "Tibetan",
121
+ "tl": "Tagalog",
122
+ "mg": "Malagasy",
123
+ "as": "Assamese",
124
+ "tt": "Tatar",
125
+ "haw": "Hawaiian",
126
+ "ln": "Lingala",
127
+ "ha": "Hausa",
128
+ "ba": "Bashkir",
129
+ "jw": "Javanese",
130
+ "su": "Sundanese",
131
  }
132
 
133
  source_language_list = [key[0] for key in source_languages.items()]
 
227
  Speaker diarization model and pipeline from by https://github.com/pyannote/pyannote-audio
228
  """
229
 
230
+ # model = whisper.load_model(whisper_model)
231
+ # model = WhisperModel(whisper_model, device="cuda", compute_type="int8_float16")
232
+ model = WhisperModel(whisper_model, compute_type="int8")
233
  time_start = time.time()
234
  if(video_file_path == None):
235
  raise ValueError("Error no video input")
 
351
  youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
352
  df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
353
  memory = psutil.virtual_memory()
354
+ selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="en", label="Spoken language in video", interactive=True)
355
+ selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model", interactive=True)
356
+ number_speakers = gr.Number(precision=0, value=0, label="Input number of speakers for better results. If value=0, model will automatic find the best number of speakers", interactive=True)
357
  system_info = gr.Markdown(f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*")
358
  download_transcript = gr.File(label="Download transcript")
359
  transcription_df = gr.DataFrame(value=df_init,label="Transcription dataframe", row_count=(0, "dynamic"), max_rows = 10, wrap=True, overflow_row_behaviour='paginate')
360
+ title = "Whisper speaker diarization"
361
  demo = gr.Blocks(title=title)
362
  demo.encrypt = False
363
 
 
366
  with gr.Tab("Whisper speaker diarization"):
367
  gr.Markdown('''
368
  <div>
369
+ <h1 style='text-align: center'>Whisper speaker diarization</h1>
370
+ This space uses Whisper models from <a href='https://github.com/openai/whisper' target='_blank'><b>OpenAI</b></a> with <a href='https://github.com/guillaumekln/faster-whisper' target='_blank'><b>CTranslate2</b></a> which is a fast inference engine for Transformer models to recognize the speech (4 times faster than original openai model with same accuracy)
371
+ and ECAPA-TDNN model from <a href='https://github.com/speechbrain/speechbrain' target='_blank'><b>SpeechBrain</b></a> to encode and clasify speakers
 
372
  </div>
373
  ''')
374
 
375
  with gr.Row():
376
  gr.Markdown('''
377
  ### Transcribe youtube link using OpenAI Whisper
378
+ ##### 1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts.
379
+ ##### 2. Generating speaker embeddings for each segments.
380
+ ##### 3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment.
381
  ''')
382
 
383
  with gr.Row():
 
433
 
434
 
435
 
436
+ with gr.Tab("Whisper Transcribe Japanese Audio"):
437
+ gr.Markdown(f'''
438
+ <div>
439
+ <h1 style='text-align: center'>Whisper Transcribe Japanese Audio</h1>
440
+ </div>
441
+ Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned
442
+ checkpoint <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
443
+ ''')
444
+ microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
445
+ upload = gr.inputs.Audio(source="upload", type="filepath", optional=True)
446
+ transcribe_btn = gr.Button("Transcribe Audio")
447
+ text_output = gr.Textbox()
448
+ with gr.Row():
449
+ gr.Markdown('''
450
+ ### You can test by following examples:
451
+ ''')
452
+ examples = gr.Examples(examples=
453
+ [ "sample1.wav",
454
+ "sample2.wav",
455
+ ],
456
+ label="Examples", inputs=[upload])
457
+ transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output)
458
 
459
+ with gr.Tab("Whisper Transcribe Japanese YouTube"):
460
+ gr.Markdown(f'''
461
+ <div>
462
+ <h1 style='text-align: center'>Whisper Transcribe Japanese YouTube</h1>
463
+ </div>
464
+ Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint:
465
+ <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
466
+ ''')
467
+ youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True)
468
+ yt_transcribe_btn = gr.Button("Transcribe YouTube")
469
+ text_output2 = gr.Textbox()
470
+ html_output = gr.Markdown()
471
+ yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2])
472
 
473
  demo.launch(debug=True)