sungo-ganpare commited on
Commit
21b4fcb
·
1 Parent(s): 326f6f8

いろいろ反映

Browse files
Files changed (1) hide show
  1. app.py +461 -39
app.py CHANGED
@@ -1,70 +1,492 @@
 
1
  import torch
2
  import gradio as gr
3
  import spaces
 
4
  import shutil
5
  from pathlib import Path
6
  from pydub import AudioSegment
7
  import numpy as np
8
  import os
9
- import gc
10
- from nemo.collections.asr.models import ASRModel
 
11
 
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
- MODEL_NAME = "nvidia/parakeet-tdt-0.6b-v2"
14
 
15
  model = ASRModel.from_pretrained(model_name=MODEL_NAME)
16
- model = model.to(device).eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  @spaces.GPU
19
- def transcribe(audio_path):
20
  if not audio_path:
21
- return [["Error", "Error", "No file"]], [["0.0", "0.0", ""]], "", [["0.0", "0.0", ""]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- audio = AudioSegment.from_file(audio_path)
24
- audio = audio.set_frame_rate(16000).set_channels(1)
25
- temp_path = "temp.wav"
26
- audio.export(temp_path, format="wav")
 
27
 
28
- model.to(torch.bfloat16)
29
- output = model.transcribe([temp_path], timestamps=True)
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- segments = output[0].timestamp.get("segment", [])
32
- chars = output[0].timestamp.get("char", [])
 
 
 
 
 
33
 
34
- segment_data = [[f"{s['start']:.2f}", f"{s['end']:.2f}", s['segment']] for s in segments]
35
- char_data = [[f"{c['start']:.2f}", f"{c['end']:.2f}", c['char']] for c in chars]
 
 
 
 
36
 
37
- os.remove(temp_path)
38
- return segment_data, char_data, output[0].text, char_data
 
 
 
 
 
 
 
 
 
 
39
 
 
 
 
 
 
40
 
41
- def end_session():
42
- print("Session cleanup complete.")
43
- gc.collect()
44
- if torch.cuda.is_available():
45
- torch.cuda.empty_cache()
46
 
47
- with gr.Blocks() as demo:
48
- gr.Markdown("## 🎙️ Parakeet-TDT ASR with Segment + Character Timestamps")
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- with gr.Row():
51
- audio_input = gr.Audio(type="filepath", label="Upload Audio File")
52
- transcribe_btn = gr.Button("Transcribe")
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- output_text = gr.Textbox(label="Full Transcription")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  with gr.Tabs():
57
- with gr.TabItem("Segment Timestamps"):
58
- segment_df = gr.DataFrame(headers=["Start", "End", "Segment"], wrap=True)
59
- with gr.TabItem("Character Timestamps"):
60
- char_df = gr.DataFrame(headers=["Start", "End", "Char"], wrap=False)
61
-
62
- transcribe_btn.click(
63
- fn=transcribe,
64
- inputs=[audio_input],
65
- outputs=[segment_df, char_df, output_text, char_df]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  )
67
 
68
  demo.unload(end_session)
69
 
70
- demo.launch()
 
 
 
 
1
+ from nemo.collections.asr.models import ASRModel
2
  import torch
3
  import gradio as gr
4
  import spaces
5
+ import gc
6
  import shutil
7
  from pathlib import Path
8
  from pydub import AudioSegment
9
  import numpy as np
10
  import os
11
+ import gradio.themes as gr_themes
12
+ import csv
13
+ import json
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ MODEL_NAME="nvidia/parakeet-tdt-0.6b-v2"
17
 
18
  model = ASRModel.from_pretrained(model_name=MODEL_NAME)
19
+ model.eval()
20
+
21
+ def start_session(request: gr.Request):
22
+ session_hash = request.session_hash
23
+ session_dir = Path(f'/tmp/{session_hash}')
24
+ session_dir.mkdir(parents=True, exist_ok=True)
25
+ print(f"Session with hash {session_hash} started.")
26
+ return session_dir.as_posix()
27
+
28
+ def end_session(request: gr.Request):
29
+ session_hash = request.session_hash
30
+ session_dir = Path(f'/tmp/{session_hash}')
31
+ if session_dir.exists():
32
+ shutil.rmtree(session_dir)
33
+ print(f"Session with hash {session_hash} ended.")
34
+
35
+ def get_audio_segment(audio_path, start_second, end_second):
36
+ if not audio_path or not Path(audio_path).exists():
37
+ print(f"Warning: Audio path '{audio_path}' not found or invalid for clipping.")
38
+ return None
39
+ try:
40
+ start_ms = int(start_second * 1000)
41
+ end_ms = int(end_second * 1000)
42
+
43
+ start_ms = max(0, start_ms)
44
+ if end_ms <= start_ms:
45
+ print(f"Warning: End time ({end_second}s) is not after start time ({start_second}s). Adjusting end time.")
46
+ end_ms = start_ms + 100
47
+
48
+ audio = AudioSegment.from_file(audio_path)
49
+ clipped_audio = audio[start_ms:end_ms]
50
+
51
+ samples = np.array(clipped_audio.get_array_of_samples())
52
+ if clipped_audio.channels == 2:
53
+ samples = samples.reshape((-1, 2)).mean(axis=1).astype(samples.dtype)
54
+
55
+ frame_rate = clipped_audio.frame_rate
56
+ if frame_rate <= 0:
57
+ print(f"Warning: Invalid frame rate ({frame_rate}) detected for clipped audio.")
58
+ frame_rate = audio.frame_rate
59
+
60
+ if samples.size == 0:
61
+ print(f"Warning: Clipped audio resulted in empty samples array ({start_second}s to {end_second}s).")
62
+ return None
63
+
64
+ return (frame_rate, samples)
65
+ except FileNotFoundError:
66
+ print(f"Error: Audio file not found at path: {audio_path}")
67
+ return None
68
+ except Exception as e:
69
+ print(f"Error clipping audio {audio_path} from {start_second}s to {end_second}s: {e}")
70
+ return None
71
 
72
  @spaces.GPU
73
+ def get_transcripts_and_raw_times(audio_path, session_dir):
74
  if not audio_path:
75
+ gr.Error("No audio file path provided for transcription.", duration=None)
76
+ return [], [], [], None, gr.DownloadButton(visible=False)
77
+
78
+ vis_data = [["N/A", "N/A", "Processing failed"]]
79
+ raw_times_data = [[0.0, 0.0]]
80
+ char_vis_data = []
81
+ processed_audio_path = None
82
+ original_path_name = Path(audio_path).name
83
+ audio_name = Path(audio_path).stem
84
+
85
+ try:
86
+ try:
87
+ gr.Info(f"Loading audio: {original_path_name}", duration=2)
88
+ audio = AudioSegment.from_file(audio_path)
89
+ duration_sec = audio.duration_seconds
90
+ except Exception as load_e:
91
+ gr.Error(f"Failed to load audio file {original_path_name}: {load_e}", duration=None)
92
+ return [["Error", "Error", "Load failed"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
93
+
94
+ resampled = False
95
+ mono = False
96
+ target_sr = 16000
97
+
98
+ if audio.frame_rate != target_sr:
99
+ try:
100
+ audio = audio.set_frame_rate(target_sr)
101
+ resampled = True
102
+ except Exception as resample_e:
103
+ gr.Error(f"Failed to resample audio: {resample_e}", duration=None)
104
+ return [["Error", "Error", "Resample failed"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
105
+
106
+ if audio.channels == 2:
107
+ try:
108
+ audio = audio.set_channels(1)
109
+ mono = True
110
+ except Exception as mono_e:
111
+ gr.Error(f"Failed to convert audio to mono: {mono_e}", duration=None)
112
+ return [["Error", "Error", "Mono conversion failed"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
113
+ elif audio.channels > 2:
114
+ gr.Error(f"Audio has {audio.channels} channels. Only mono (1) or stereo (2) supported.", duration=None)
115
+ return [["Error", "Error", f"{audio.channels}-channel audio not supported"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
116
+
117
+ if resampled or mono:
118
+ try:
119
+ processed_audio_path = Path(session_dir, f"{audio_name}_resampled.wav")
120
+ audio.export(processed_audio_path, format="wav")
121
+ transcribe_path = processed_audio_path.as_posix()
122
+ info_path_name = f"{original_path_name} (processed)"
123
+ except Exception as export_e:
124
+ gr.Error(f"Failed to export processed audio: {export_e}", duration=None)
125
+ if processed_audio_path and os.path.exists(processed_audio_path):
126
+ os.remove(processed_audio_path)
127
+ return [["Error", "Error", "Export failed"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
128
+ else:
129
+ transcribe_path = audio_path
130
+ info_path_name = original_path_name
131
+
132
+ long_audio_settings_applied = False
133
+ try:
134
+ model.to(device)
135
+ model.to(torch.float32)
136
+ gr.Info(f"Transcribing {info_path_name} on {device}...", duration=2)
137
+
138
+ if duration_sec > 480:
139
+ try:
140
+ gr.Info("Audio longer than 8 minutes. Applying optimized settings for long transcription.", duration=3)
141
+ print("Applying long audio settings: Local Attention and Chunking.")
142
+ model.change_attention_model("rel_pos_local_attn", [256,256])
143
+ model.change_subsampling_conv_chunking_factor(1)
144
+ long_audio_settings_applied = True
145
+ except Exception as setting_e:
146
+ gr.Warning(f"Could not apply long audio settings: {setting_e}", duration=5)
147
+ print(f"Warning: Failed to apply long audio settings: {setting_e}")
148
+
149
+ model.to(torch.bfloat16)
150
+ output = model.transcribe([transcribe_path], timestamps=True)
151
+
152
+ if not output or not isinstance(output, list) or not output[0] or not hasattr(output[0], 'timestamp') or not output[0].timestamp or 'segment' not in output[0].timestamp:
153
+ gr.Error("Transcription failed or produced unexpected output format.", duration=None)
154
+ return [["Error", "Error", "Transcription Format Issue"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
155
+
156
+ segment_timestamps = output[0].timestamp['segment']
157
+ csv_headers = ["Start (s)", "End (s)", "Segment"]
158
+ vis_data = [[f"{ts['start']:.2f}", f"{ts['end']:.2f}", ts['segment']] for ts in segment_timestamps]
159
+ raw_times_data = [[ts['start'], ts['end']] for ts in segment_timestamps]
160
+
161
+ char_timestamps_raw = output[0].timestamp.get("char", [])
162
+ if not isinstance(char_timestamps_raw, list):
163
+ print(f"Warning: char_timestamps_raw is not a list, but {type(char_timestamps_raw)}. Defaulting to empty.")
164
+ char_timestamps_raw = []
165
+ char_vis_data = [
166
+ [f"{c['start']:.2f}", f"{c['end']:.2f}", c["char"]]
167
+ for c in char_timestamps_raw if isinstance(c, dict) and 'start' in c and 'end' in c and 'char' in c
168
+ ]
169
 
170
+ word_timestamps_raw = output[0].timestamp.get("word", [])
171
+ word_vis_data = [
172
+ [f"{w['start']:.2f}", f"{w['end']:.2f}", w["word"]]
173
+ for w in word_timestamps_raw if isinstance(w, dict) and 'start' in w and 'end' in w and 'word' in w
174
+ ]
175
 
176
+ button_update = gr.DownloadButton(visible=False)
177
+ srt_file_path = None
178
+ vtt_file_path = None
179
+ json_file_path = None
180
+ lrc_file_path = None
181
+ try:
182
+ csv_file_path = Path(session_dir, f"transcription_{audio_name}.csv")
183
+ with open(csv_file_path, 'w', newline='', encoding='utf-8') as f:
184
+ writer = csv.writer(f)
185
+ writer.writerow(csv_headers)
186
+ writer.writerows(vis_data)
187
+ print(f"CSV transcript saved to temporary file: {csv_file_path}")
188
+ button_update = gr.DownloadButton(value=csv_file_path.as_posix(), visible=True)
189
 
190
+ srt_file_path = Path(session_dir, f"transcription_{audio_name}.srt")
191
+ vtt_file_path = Path(session_dir, f"transcription_{audio_name}.vtt")
192
+ json_file_path = Path(session_dir, f"transcription_{audio_name}.json")
193
+ write_srt(vis_data, srt_file_path)
194
+ write_vtt(vis_data, word_vis_data, vtt_file_path)
195
+ write_json(vis_data, word_vis_data, json_file_path)
196
+ print(f"SRT, VTT, JSON transcript saved to temporary files: {srt_file_path}, {vtt_file_path}, {json_file_path}")
197
 
198
+ lrc_file_path = Path(session_dir, f"transcription_{audio_name}.lrc")
199
+ write_lrc(vis_data, lrc_file_path)
200
+ print(f"LRC transcript saved to temporary file: {lrc_file_path}")
201
+ except Exception as csv_e:
202
+ gr.Error(f"Failed to create transcript files: {csv_e}", duration=None)
203
+ print(f"Error writing transcript files: {csv_e}")
204
 
205
+ gr.Info("Transcription complete.", duration=2)
206
+ return (
207
+ vis_data,
208
+ raw_times_data,
209
+ word_vis_data,
210
+ audio_path,
211
+ gr.DownloadButton(value=csv_file_path.as_posix(), visible=True),
212
+ gr.DownloadButton(value=srt_file_path.as_posix(), visible=True),
213
+ gr.DownloadButton(value=vtt_file_path.as_posix(), visible=True),
214
+ gr.DownloadButton(value=json_file_path.as_posix(), visible=True),
215
+ gr.DownloadButton(value=lrc_file_path.as_posix(), visible=True)
216
+ )
217
 
218
+ except torch.cuda.OutOfMemoryError as e:
219
+ error_msg = 'CUDA out of memory. Please try a shorter audio or reduce GPU load.'
220
+ print(f"CUDA OutOfMemoryError: {e}")
221
+ gr.Error(error_msg, duration=None)
222
+ return [["OOM", "OOM", error_msg]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
223
 
224
+ except FileNotFoundError:
225
+ error_msg = f"Audio file for transcription not found: {Path(transcribe_path).name}."
226
+ print(f"Error: Transcribe audio file not found at path: {transcribe_path}")
227
+ gr.Error(error_msg, duration=None)
228
+ return [["Error", "Error", "File not found for transcription"]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
229
 
230
+ except Exception as e:
231
+ error_msg = f"Transcription failed: {e}"
232
+ print(f"Error during transcription processing: {e}")
233
+ gr.Error(error_msg, duration=None)
234
+ return [["Error", "Error", error_msg]], [[0.0, 0.0]], [], audio_path, gr.DownloadButton(visible=False)
235
+ finally:
236
+ try:
237
+ if long_audio_settings_applied:
238
+ try:
239
+ print("Reverting long audio settings.")
240
+ model.change_attention_model("rel_pos")
241
+ model.change_subsampling_conv_chunking_factor(-1)
242
+ except Exception as revert_e:
243
+ print(f"Warning: Failed to revert long audio settings: {revert_e}")
244
+ gr.Warning(f"Issue reverting model settings after long transcription: {revert_e}", duration=5)
245
 
246
+ if 'model' in locals() and hasattr(model, 'cpu'):
247
+ if device == 'cuda':
248
+ model.cpu()
249
+ gc.collect()
250
+ if device == 'cuda':
251
+ torch.cuda.empty_cache()
252
+ except Exception as cleanup_e:
253
+ print(f"Error during model cleanup: {cleanup_e}")
254
+ gr.Warning(f"Issue during model cleanup: {cleanup_e}", duration=5)
255
+ finally:
256
+ if processed_audio_path and os.path.exists(processed_audio_path):
257
+ try:
258
+ os.remove(processed_audio_path)
259
+ print(f"Temporary audio file {processed_audio_path} removed.")
260
+ except Exception as e:
261
+ print(f"Error removing temporary audio file {processed_audio_path}: {e}")
262
 
263
+ def play_segment(evt: gr.SelectData, raw_ts_list, current_audio_path):
264
+ if not isinstance(raw_ts_list, list):
265
+ print(f"Warning: raw_ts_list is not a list ({type(raw_ts_list)}). Cannot play segment.")
266
+ return gr.Audio(value=None, label="Selected Segment")
267
+
268
+ if not current_audio_path:
269
+ print("No audio path available to play segment from.")
270
+ return gr.Audio(value=None, label="Selected Segment")
271
+
272
+ selected_index = evt.index[0]
273
+
274
+ if selected_index < 0 or selected_index >= len(raw_ts_list):
275
+ print(f"Invalid index {selected_index} selected for list of length {len(raw_ts_list)}.")
276
+ return gr.Audio(value=None, label="Selected Segment")
277
+
278
+ if not isinstance(raw_ts_list[selected_index], (list, tuple)) or len(raw_ts_list[selected_index]) != 2:
279
+ print(f"Warning: Data at index {selected_index} is not in the expected format [start, end].")
280
+ return gr.Audio(value=None, label="Selected Segment")
281
+
282
+ start_time_s, end_time_s = raw_ts_list[selected_index]
283
+ print(f"Attempting to play segment: {current_audio_path} from {start_time_s:.2f}s to {end_time_s:.2f}s")
284
+ segment_data = get_audio_segment(current_audio_path, start_time_s, end_time_s)
285
+
286
+ if segment_data:
287
+ print("Segment data retrieved successfully.")
288
+ return gr.Audio(value=segment_data, autoplay=True, label=f"Segment: {start_time_s:.2f}s - {end_time_s:.2f}s", interactive=False)
289
+ else:
290
+ print("Failed to get audio segment data.")
291
+ return gr.Audio(value=None, label="Selected Segment")
292
+
293
+ def write_srt(segments, path):
294
+ def sec2srt(t):
295
+ h, rem = divmod(int(float(t)), 3600)
296
+ m, s = divmod(rem, 60)
297
+ ms = int((float(t) - int(float(t))) * 1000)
298
+ return f"{h:02}:{m:02}:{s:02},{ms:03}"
299
+ with open(path, "w", encoding="utf-8") as f:
300
+ for i, seg in enumerate(segments, 1):
301
+ f.write(f"{i}\n{sec2srt(seg[0])} --> {sec2srt(seg[1])}\n{seg[2]}\n\n")
302
+
303
+ def write_vtt(segments, words, path):
304
+ def sec2vtt(t):
305
+ h, rem = divmod(int(float(t)), 3600)
306
+ m, s = divmod(rem, 60)
307
+ ms = int((float(t) - int(float(t))) * 1000)
308
+ return f"{h:02}:{m:02}:{s:02}.{ms:03}"
309
+
310
+ with open(path, "w", encoding="utf-8") as f:
311
+ f.write("WEBVTT\n\n")
312
+
313
+ word_idx = 0
314
+ for seg in segments:
315
+ s_start = float(seg[0])
316
+ s_end = float(seg[1])
317
+ s_text = seg[2]
318
+
319
+ # このセグメントに含まれる単語を抽出
320
+ segment_words = []
321
+ while word_idx < len(words):
322
+ w = words[word_idx]
323
+ w_start = float(w[0])
324
+ w_end = float(w[1])
325
+ if w_start >= s_start and w_end <= s_end:
326
+ segment_words.append(w)
327
+ word_idx += 1
328
+ elif w_end < s_start:
329
+ word_idx += 1
330
+ else:
331
+ break
332
+
333
+ # 各単語ごとにタイムスタンプを生成
334
+ for i, w in enumerate(segment_words):
335
+ w_start = float(w[0])
336
+ w_end = float(w[1])
337
+ w_text = w[2]
338
+
339
+ # 現在の単語を強調表示し、他の単語は通常表示
340
+ colored_text = ""
341
+ for j, other_w in enumerate(segment_words):
342
+ if j == i:
343
+ colored_text += f"<c.yellow><b>{other_w[2]}</b></c> "
344
+ else:
345
+ colored_text += f"{other_w[2]} "
346
+
347
+ f.write(f"{sec2vtt(w_start)} --> {sec2vtt(w_end)}\n{colored_text.strip()}\n\n")
348
+
349
+ def write_json(segments, words, path):
350
+ result = {"segments": []}
351
+ word_idx = 0
352
+ for s in segments:
353
+ s_start = float(s[0])
354
+ s_end = float(s[1])
355
+ s_text = s[2]
356
+ word_list = []
357
+ while word_idx < len(words):
358
+ w = words[word_idx]
359
+ w_start = float(w[0])
360
+ w_end = float(w[1])
361
+ if w_start >= s_start and w_end <= s_end:
362
+ word_list.append({"start": w_start, "end": w_end, "word": w[2]})
363
+ word_idx += 1
364
+ elif w_end < s_start:
365
+ word_idx += 1
366
+ else:
367
+ break
368
+ result["segments"].append({
369
+ "start": s_start,
370
+ "end": s_end,
371
+ "text": s_text,
372
+ "words": word_list
373
+ })
374
+ with open(path, "w", encoding="utf-8") as f:
375
+ json.dump(result, f, ensure_ascii=False, indent=2)
376
+
377
+ def write_lrc(segments, path):
378
+ def sec2lrc(t):
379
+ m, s = divmod(float(t), 60)
380
+ return f"[{int(m):02}:{s:05.2f}]"
381
+ with open(path, "w", encoding="utf-8") as f:
382
+ for seg in segments:
383
+ f.write(f"{sec2lrc(seg[0])}{seg[2]}\n")
384
+
385
+ article = (
386
+ "<p style='font-size: 1.1em;'>"
387
+ "This demo showcases <code><a href='https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2'>parakeet-tdt-0.6b-v2</a></code>, a 600-million-parameter model designed for high-quality English speech recognition."
388
+ "</p>"
389
+ "<p><strong style='color: red; font-size: 1.2em;'>Key Features:</strong></p>"
390
+ "<ul style='font-size: 1.1em;'>"
391
+ " <li>Automatic punctuation and capitalization</li>"
392
+ " <li>Accurate word-level timestamps (click on a segment in the table below to play it!)</li>"
393
+ " <li>Character-level timestamps now available in the 'Character View' tab.</li>"
394
+ " <li>Efficiently transcribes long audio segments (<strong>updated to support upto 3 hours</strong>) <small>(For even longer audios, see <a href='https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py' target='_blank'>this script</a>)</small></li>"
395
+ " <li>Robust performance on spoken numbers, and song lyrics transcription </li>"
396
+ "</ul>"
397
+ "<p style='font-size: 1.1em;'>"
398
+ "This model is <strong>available for commercial and non-commercial use</strong>."
399
+ "</p>"
400
+ "<p style='text-align: center;'>"
401
+ "<a href='https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2' target='_blank'>🎙️ Learn more about the Model</a> | "
402
+ "<a href='https://arxiv.org/abs/2305.05084' target='_blank'>📄 Fast Conformer paper</a> | "
403
+ "<a href='https://arxiv.org/abs/2304.06795' target='_blank'>📚 TDT paper</a> | "
404
+ "<a href='https://github.com/NVIDIA/NeMo' target='_blank'>🧑‍💻 NeMo Repository</a>"
405
+ "</p>"
406
+ )
407
+
408
+ examples = [
409
+ ["data/example-yt_saTD1u8PorI.mp3"],
410
+ ]
411
+
412
+ nvidia_theme = gr_themes.Default(
413
+ primary_hue=gr_themes.Color(
414
+ c50="#E6F1D9", c100="#CEE3B3", c200="#B5D58C", c300="#9CC766",
415
+ c400="#84B940", c500="#76B900", c600="#68A600", c700="#5A9200",
416
+ c800="#4C7E00", c900="#3E6A00", c950="#2F5600"
417
+ ),
418
+ neutral_hue="gray",
419
+ font=[gr_themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
420
+ ).set()
421
+
422
+ with gr.Blocks(theme=nvidia_theme) as demo:
423
+ model_display_name = MODEL_NAME.split('/')[-1] if '/' in MODEL_NAME else MODEL_NAME
424
+ gr.Markdown(f"<h1 style='text-align: center; margin: 0 auto;'>Speech Transcription with {model_display_name}</h1>")
425
+ gr.HTML(article)
426
+
427
+ current_audio_path_state = gr.State(None)
428
+ raw_timestamps_list_state = gr.State([])
429
+ session_dir_state = gr.State()
430
+ demo.load(start_session, outputs=[session_dir_state])
431
+
432
+ with gr.Tabs():
433
+ with gr.TabItem("Audio File"):
434
+ file_input = gr.Audio(sources=["upload"], type="filepath", label="Upload Audio File")
435
+ gr.Examples(examples=examples, inputs=[file_input], label="Example Audio Files (Click to Load)")
436
+ file_transcribe_btn = gr.Button("Transcribe Uploaded File", variant="primary")
437
+
438
+ with gr.TabItem("Microphone"):
439
+ mic_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio")
440
+ mic_transcribe_btn = gr.Button("Transcribe Microphone Input", variant="primary")
441
+
442
+ gr.Markdown("---")
443
+ gr.Markdown("<p><strong style='color: #FF0000; font-size: 1.2em;'>Transcription Results</strong></p>")
444
+
445
+ download_btn = gr.DownloadButton(label="Download Segment Transcript (CSV)", visible=False)
446
+ srt_btn = gr.DownloadButton(label="Download SRT", visible=False)
447
+ vtt_btn = gr.DownloadButton(label="Download VTT", visible=False)
448
+ json_btn = gr.DownloadButton(label="Download JSON", visible=False)
449
+ lrc_btn = gr.DownloadButton(label="Download LRC", visible=False)
450
 
451
  with gr.Tabs():
452
+ with gr.TabItem("Segment View (Click row to play segment)"):
453
+ vis_timestamps_df = gr.DataFrame(
454
+ headers=["Start (s)", "End (s)", "Segment"],
455
+ datatype=["number", "number", "str"],
456
+ wrap=True,
457
+ )
458
+ selected_segment_player = gr.Audio(label="Selected Segment", interactive=False)
459
+
460
+ with gr.TabItem("Word View"):
461
+ word_vis_df = gr.DataFrame(
462
+ headers=["Start (s)", "End (s)", "Word"],
463
+ datatype=["number", "number", "str"],
464
+ wrap=False,
465
+ )
466
+
467
+ mic_transcribe_btn.click(
468
+ fn=get_transcripts_and_raw_times,
469
+ inputs=[mic_input, session_dir_state],
470
+ outputs=[vis_timestamps_df, raw_timestamps_list_state, word_vis_df, current_audio_path_state, download_btn, srt_btn, vtt_btn, json_btn, lrc_btn],
471
+ api_name="transcribe_mic"
472
+ )
473
+
474
+ file_transcribe_btn.click(
475
+ fn=get_transcripts_and_raw_times,
476
+ inputs=[file_input, session_dir_state],
477
+ outputs=[vis_timestamps_df, raw_timestamps_list_state, word_vis_df, current_audio_path_state, download_btn, srt_btn, vtt_btn, json_btn, lrc_btn],
478
+ api_name="transcribe_file"
479
+ )
480
+
481
+ vis_timestamps_df.select(
482
+ fn=play_segment,
483
+ inputs=[raw_timestamps_list_state, current_audio_path_state],
484
+ outputs=[selected_segment_player],
485
  )
486
 
487
  demo.unload(end_session)
488
 
489
+ if __name__ == "__main__":
490
+ print("Launching Gradio Demo...")
491
+ demo.queue()
492
+ demo.launch()