Spaces:
Running
on
Zero
Running
on
Zero
File size: 28,585 Bytes
ba5e3a9 ce8a201 7c6ede0 b479da3 0a98475 fb46650 0a98475 7c6ede0 0a98475 9eb42b2 3da7deb 7c6ede0 0a98475 7c6ede0 0a98475 ce8a201 7c6ede0 fb46650 0a98475 02ca3a8 21b4fcb 0a98475 02ca3a8 0a98475 d4575dc 0a98475 d4575dc 0a98475 d4575dc 0a98475 d4575dc 0a98475 da5395a 0a98475 d4575dc 0a98475 02ca3a8 0a98475 02ca3a8 d4575dc 0a98475 02ca3a8 d4575dc 0a98475 02ca3a8 d4575dc 0a98475 d4575dc 0a98475 fb46650 0a98475 fb46650 d4575dc 0a98475 d4575dc d206e43 d4575dc d206e43 d4575dc 21b4fcb d4575dc 21b4fcb d4575dc 0a98475 d4575dc 3da7deb d4575dc 3da7deb d4575dc da5395a d4575dc 3da7deb d4575dc 3da7deb da5395a 3da7deb 9eb42b2 3da7deb 9eb42b2 21b4fcb 0a98475 3da7deb 02ca3a8 0a98475 02ca3a8 0a98475 02ca3a8 0a98475 7c6ede0 9eb42b2 7c6ede0 0a98475 b0b7186 0a98475 21b4fcb da5395a b087acf 0a98475 7c6ede0 0a98475 7c6ede0 0a98475 7c6ede0 0a98475 02ca3a8 0a98475 02ca3a8 0a98475 b0b7186 0a98475 21b4fcb 02ca3a8 fb46650 0a98475 02ca3a8 9eb42b2 02ca3a8 0a98475 02ca3a8 9eb42b2 21b4fcb 9eb42b2 02ca3a8 21b4fcb 02ca3a8 0a98475 02ca3a8 9eb42b2 0a98475 7c6ede0 0a98475 02ca3a8 9eb42b2 0a98475 7c6ede0 0a98475 7c6ede0 9eb42b2 fb46650 0a98475 d206e43 fb46650 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 |
from nemo.collections.asr.models import ASRModel
import torch
import gradio as gr
import spaces
import gc
import shutil
from pathlib import Path
from pydub import AudioSegment
import numpy as np
import os
import gradio.themes as gr_themes
import csv
import json
from typing import List, Tuple
device = "cuda" if torch.cuda.is_available() else "cpu"
MODEL_NAME="nvidia/parakeet-tdt-0.6b-v2"
model = ASRModel.from_pretrained(model_name=MODEL_NAME)
model.eval()
def start_session(request: gr.Request):
session_hash = request.session_hash
session_dir = Path(f'/tmp/{session_hash}')
session_dir.mkdir(parents=True, exist_ok=True)
print(f"Session with hash {session_hash} started.")
return session_dir.as_posix()
def end_session(request: gr.Request):
session_hash = request.session_hash
session_dir = Path(f'/tmp/{session_hash}')
if session_dir.exists():
shutil.rmtree(session_dir)
print(f"Session with hash {session_hash} ended.")
def get_audio_segment(audio_path, start_second, end_second):
if not audio_path or not Path(audio_path).exists():
print(f"Warning: Audio path '{audio_path}' not found or invalid for clipping.")
return None
try:
start_ms = int(start_second * 1000)
end_ms = int(end_second * 1000)
start_ms = max(0, start_ms)
if end_ms <= start_ms:
print(f"Warning: End time ({end_second}s) is not after start time ({start_second}s). Adjusting end time.")
end_ms = start_ms + 100
audio = AudioSegment.from_file(audio_path)
clipped_audio = audio[start_ms:end_ms]
samples = np.array(clipped_audio.get_array_of_samples())
if clipped_audio.channels == 2:
samples = samples.reshape((-1, 2)).mean(axis=1).astype(samples.dtype)
frame_rate = clipped_audio.frame_rate
if frame_rate <= 0:
print(f"Warning: Invalid frame rate ({frame_rate}) detected for clipped audio.")
frame_rate = audio.frame_rate
if samples.size == 0:
print(f"Warning: Clipped audio resulted in empty samples array ({start_second}s to {end_second}s).")
return None
return (frame_rate, samples)
except FileNotFoundError:
print(f"Error: Audio file not found at path: {audio_path}")
return None
except Exception as e:
print(f"Error clipping audio {audio_path} from {start_second}s to {end_second}s: {e}")
return None
def preprocess_audio(audio_path, session_dir):
"""
オーディオファイルの前処理(リサンプリング、モノラル変換)を行う。
Args:
audio_path (str): 入力オーディオファイルのパス。
session_dir (str): セッションディレクトリのパス。
Returns:
tuple: (processed_path, info_path_name, duration_sec) のタプル、または None(処理に失敗した場合)。
"""
try:
original_path_name = Path(audio_path).name
audio_name = Path(audio_path).stem
try:
gr.Info(f"Loading audio: {original_path_name}", duration=2)
audio = AudioSegment.from_file(audio_path)
duration_sec = audio.duration_seconds
except Exception as load_e:
gr.Error(f"Failed to load audio file {original_path_name}: {load_e}", duration=None)
return None, None, None
resampled = False
mono = False
target_sr = 16000
if audio.frame_rate != target_sr:
try:
audio = audio.set_frame_rate(target_sr)
resampled = True
except Exception as resample_e:
gr.Error(f"Failed to resample audio: {resample_e}", duration=None)
return None, None, None
if audio.channels == 2:
try:
audio = audio.set_channels(1)
mono = True
except Exception as mono_e:
gr.Error(f"Failed to convert audio to mono: {mono_e}", duration=None)
return None, None, None
elif audio.channels > 2:
gr.Error(f"Audio has {audio.channels} channels. Only mono (1) or stereo (2) supported.", duration=None)
return None, None, None
processed_audio_path = None
if resampled or mono:
try:
processed_audio_path = Path(session_dir, f"{audio_name}_resampled.wav")
audio.export(processed_audio_path, format="wav")
transcribe_path = processed_audio_path.as_posix()
info_path_name = f"{original_path_name} (processed)"
except Exception as export_e:
gr.Error(f"Failed to export processed audio: {export_e}", duration=None)
if processed_audio_path and os.path.exists(processed_audio_path):
os.remove(processed_audio_path)
return None, None, None
else:
transcribe_path = audio_path
info_path_name = original_path_name
return transcribe_path, info_path_name, duration_sec
except Exception as e:
gr.Error(f"Audio preprocessing failed: {e}", duration=None)
return None, None, None
def transcribe_audio(transcribe_path, model, duration_sec, device):
"""
オーディオファイルを文字起こしし、タイムスタンプを取得する。
Args:
transcribe_path (str): 入力オーディオファイルのパス。
model (ASRModel): 使用するASRモデル。
duration_sec (float): オーディオファイルの長さ(秒)。
device (str): 使用するデバイス('cuda' or 'cpu')。
Returns:
tuple: (vis_data, raw_times_data, word_vis_data) のタプル、または None(処理に失敗した場合)。
"""
long_audio_settings_applied = False
try:
# CUDA 使用前にメモリをクリアし、断片化を低減
if device == 'cuda':
torch.cuda.empty_cache()
gc.collect()
model.to(device)
model.to(torch.float32)
# メモリ状況をログ出力(デバッグ用)
if device == 'cuda':
print(f"CUDA Memory before transcription: {torch.cuda.memory_allocated() / 1024**2:.2f} MB")
gr.Info(f"Transcribing on {device}...", duration=2)
if duration_sec > 480:
try:
gr.Info("Audio longer than 8 minutes. Applying optimized settings for long transcription.", duration=3)
print("Applying long audio settings: Local Attention and Chunking.")
model.change_attention_model("rel_pos_local_attn", [256,256])
model.change_subsampling_conv_chunking_factor(1)
long_audio_settings_applied = True
except Exception as setting_e:
gr.Warning(f"Could not apply long audio settings: {setting_e}", duration=5)
print(f"Warning: Failed to apply long audio settings: {setting_e}")
model.to(torch.bfloat16)
output = model.transcribe([transcribe_path], timestamps=True)
if not output or not isinstance(output, list) or not output[0] or not hasattr(output[0], 'timestamp') or not output[0].timestamp or 'segment' not in output[0].timestamp:
gr.Error("Transcription failed or produced unexpected output format.", duration=None)
return None, None, None
segment_timestamps = output[0].timestamp['segment']
vis_data = [[f"{ts['start']:.2f}", f"{ts['end']:.2f}", ts['segment']] for ts in segment_timestamps]
raw_times_data = [[ts['start'], ts['end']] for ts in segment_timestamps]
word_timestamps_raw = output[0].timestamp.get("word", [])
word_vis_data = [
[f"{w['start']:.2f}", f"{w['end']:.2f}", w["word"]]
for w in word_timestamps_raw if isinstance(w, dict) and 'start' in w and 'end' in w and 'word' in w
]
gr.Info("Transcription complete.", duration=2)
return vis_data, raw_times_data, word_vis_data
except torch.cuda.OutOfMemoryError as e:
error_msg = 'CUDA out of memory. Please try a shorter audio or reduce GPU load.'
print(f"CUDA OutOfMemoryError: {e}")
gr.Error(error_msg, duration=None)
return None, None, None
except Exception as e:
error_msg = f"Transcription failed: {e}"
print(f"Error during transcription processing: {e}")
gr.Error(error_msg, duration=None)
return None, None, None
finally:
try:
if long_audio_settings_applied:
try:
print("Reverting long audio settings.")
model.change_attention_model("rel_pos")
model.change_subsampling_conv_chunking_factor(-1)
except Exception as revert_e:
print(f"Warning: Failed to revert long audio settings: {revert_e}")
gr.Warning(f"Issue reverting model settings after long transcription: {revert_e}", duration=5)
if device == 'cuda':
model.cpu()
gc.collect()
if device == 'cuda':
torch.cuda.empty_cache()
except Exception as cleanup_e:
print(f"Error during model cleanup: {cleanup_e}")
gr.Warning(f"Issue during model cleanup: {cleanup_e}", duration=5)
def save_transcripts(session_dir, audio_name, vis_data, word_vis_data):
"""
文字起こし結果を各種ファイル形式(CSV、SRT、VTT、JSON、LRC)で保存する。
Args:
session_dir (str): セッションディレクトリのパス。
audio_name (str): オーディオファイルの名前。
vis_data (list): 表示用の文字起こし結果のリスト。
word_vis_data (list): 単語レベルのタイムスタンプのリスト。
Returns:
tuple: 各ファイルのダウンロードボタンの更新情報を含むタプル。
"""
try:
csv_headers = ["Start (s)", "End (s)", "Segment"]
csv_file_path = Path(session_dir, f"transcription_{audio_name}.csv")
with open(csv_file_path, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(csv_headers)
writer.writerows(vis_data)
print(f"CSV transcript saved to temporary file: {csv_file_path}")
srt_file_path = Path(session_dir, f"transcription_{audio_name}.srt")
vtt_file_path = Path(session_dir, f"transcription_{audio_name}.vtt")
json_file_path = Path(session_dir, f"transcription_{audio_name}.json")
write_srt(vis_data, srt_file_path)
write_vtt(vis_data, word_vis_data, vtt_file_path)
write_json(vis_data, word_vis_data, json_file_path)
print(f"SRT, VTT, JSON transcript saved to temporary files: {srt_file_path}, {vtt_file_path}, {json_file_path}")
lrc_file_path = Path(session_dir, f"transcription_{audio_name}.lrc")
write_lrc(vis_data, lrc_file_path)
print(f"LRC transcript saved to temporary file: {lrc_file_path}")
return (
gr.DownloadButton(value=csv_file_path.as_posix(), visible=True),
gr.DownloadButton(value=srt_file_path.as_posix(), visible=True),
gr.DownloadButton(value=vtt_file_path.as_posix(), visible=True),
gr.DownloadButton(value=json_file_path.as_posix(), visible=True),
gr.DownloadButton(value=lrc_file_path.as_posix(), visible=True)
)
except Exception as e:
gr.Error(f"Failed to create transcript files: {e}", duration=None)
print(f"Error writing transcript files: {e}")
return tuple([gr.DownloadButton(visible=False)] * 5)
def split_audio_with_overlap(audio_path: str, session_dir: str, chunk_length_sec: int = 3600, overlap_sec: int = 30) -> List[str]:
"""
音声ファイルをchunk_length_secごとにoverlap_secのオーバーラップ付きで分割し、
分割ファイルのパスリストを返す。
"""
audio = AudioSegment.from_file(audio_path)
duration = audio.duration_seconds
chunk_paths = []
start = 0
chunk_idx = 0
while start < duration:
end = min(start + chunk_length_sec, duration)
# オーバーラップを考慮
chunk_start = max(0, start - (overlap_sec if start > 0 else 0))
chunk_end = min(end + (overlap_sec if end < duration else 0), duration)
chunk = audio[chunk_start * 1000:chunk_end * 1000]
chunk_path = Path(session_dir, f"chunk_{chunk_idx:03d}.wav").as_posix()
chunk.export(chunk_path, format="wav")
chunk_paths.append(chunk_path)
start += chunk_length_sec
chunk_idx += 1
return chunk_paths
@spaces.GPU
def get_transcripts_and_raw_times(audio_path, session_dir):
"""
オーディオファイルを処理し、文字起こし結果を生成する。
3時間を超える場合は60分ごとに分割し、オーバーラップ付きでASRを実行してマージする。
"""
if not audio_path:
gr.Error("No audio file path provided for transcription.", duration=None)
return [], [], [], None, gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False)
audio_name = Path(audio_path).stem
processed_audio_path = None
temp_chunk_paths = []
try:
# オーディオの前処理
transcribe_path, info_path_name, duration_sec = preprocess_audio(audio_path, session_dir)
if not transcribe_path or not duration_sec:
return [], [], [], audio_path, gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False)
processed_audio_path = transcribe_path if transcribe_path != audio_path else None
# 3時間超の場合は分割して逐次ASR
if duration_sec > 10800:
chunk_paths = split_audio_with_overlap(transcribe_path, session_dir, chunk_length_sec=3600, overlap_sec=30)
temp_chunk_paths = chunk_paths.copy()
all_vis_data = []
all_raw_times_data = []
all_word_vis_data = []
offset = 0.0
prev_end = 0.0
for i, chunk_path in enumerate(chunk_paths):
chunk_audio = AudioSegment.from_file(chunk_path)
chunk_duration = chunk_audio.duration_seconds
# ASR実行
result = transcribe_audio(chunk_path, model, chunk_duration, device)
if not result:
continue
vis_data, raw_times_data, word_vis_data = result
# タイムスタンプを全体のオフセットに合わせて補正
vis_data_offset = []
raw_times_data_offset = []
word_vis_data_offset = []
for row in vis_data:
s, e, seg = float(row[0]), float(row[1]), row[2]
vis_data_offset.append([f"{s+offset:.2f}", f"{e+offset:.2f}", seg])
for row in raw_times_data:
s, e = float(row[0]), float(row[1])
raw_times_data_offset.append([s+offset, e+offset])
for row in word_vis_data:
s, e, w = float(row[0]), float(row[1]), row[2]
word_vis_data_offset.append([f"{s+offset:.2f}", f"{e+offset:.2f}", w])
# オーバーラップ部分の重複除去(単純に前回のend以降のみ追加)
vis_data_offset = [row for row in vis_data_offset if float(row[0]) >= prev_end]
raw_times_data_offset = [row for row in raw_times_data_offset if row[0] >= prev_end]
word_vis_data_offset = [row for row in word_vis_data_offset if float(row[0]) >= prev_end]
if vis_data_offset:
prev_end = float(vis_data_offset[-1][1])
all_vis_data.extend(vis_data_offset)
all_raw_times_data.extend(raw_times_data_offset)
all_word_vis_data.extend(word_vis_data_offset)
offset += chunk_duration - (30 if i < len(chunk_paths)-1 else 0)
# ファイルの保存
button_updates = save_transcripts(session_dir, audio_name, all_vis_data, all_word_vis_data)
# 一時分割ファイル削除
for p in temp_chunk_paths:
try:
os.remove(p)
except Exception:
pass
return (
all_vis_data,
all_raw_times_data,
all_word_vis_data,
audio_path,
*button_updates
)
else:
# 3時間以内は従来通り
result = transcribe_audio(transcribe_path, model, duration_sec, device)
if not result:
return [], [], [], audio_path, gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False), gr.DownloadButton(visible=False)
vis_data, raw_times_data, word_vis_data = result
button_updates = save_transcripts(session_dir, audio_name, vis_data, word_vis_data)
return (
vis_data,
raw_times_data,
word_vis_data,
audio_path,
*button_updates
)
finally:
if processed_audio_path and os.path.exists(processed_audio_path):
try:
os.remove(processed_audio_path)
print(f"Temporary audio file {processed_audio_path} removed.")
except Exception as e:
print(f"Error removing temporary audio file {processed_audio_path}: {e}")
# 分割ファイルの掃除
for p in temp_chunk_paths:
if os.path.exists(p):
try:
os.remove(p)
except Exception:
pass
def play_segment(evt: gr.SelectData, raw_ts_list, current_audio_path):
if not isinstance(raw_ts_list, list):
print(f"Warning: raw_ts_list is not a list ({type(raw_ts_list)}). Cannot play segment.")
return gr.Audio(value=None, label="Selected Segment")
if not current_audio_path:
print("No audio path available to play segment from.")
return gr.Audio(value=None, label="Selected Segment")
selected_index = evt.index[0]
if selected_index < 0 or selected_index >= len(raw_ts_list):
print(f"Invalid index {selected_index} selected for list of length {len(raw_ts_list)}.")
return gr.Audio(value=None, label="Selected Segment")
if not isinstance(raw_ts_list[selected_index], (list, tuple)) or len(raw_ts_list[selected_index]) != 2:
print(f"Warning: Data at index {selected_index} is not in the expected format [start, end].")
return gr.Audio(value=None, label="Selected Segment")
start_time_s, end_time_s = raw_ts_list[selected_index]
print(f"Attempting to play segment: {current_audio_path} from {start_time_s:.2f}s to {end_time_s:.2f}s")
segment_data = get_audio_segment(current_audio_path, start_time_s, end_time_s)
if segment_data:
print("Segment data retrieved successfully.")
return gr.Audio(value=segment_data, autoplay=True, label=f"Segment: {start_time_s:.2f}s - {end_time_s:.2f}s", interactive=False)
else:
print("Failed to get audio segment data.")
return gr.Audio(value=None, label="Selected Segment")
def write_srt(segments, path):
def sec2srt(t):
h, rem = divmod(int(float(t)), 3600)
m, s = divmod(rem, 60)
ms = int((float(t) - int(float(t))) * 1000)
return f"{h:02}:{m:02}:{s:02},{ms:03}"
with open(path, "w", encoding="utf-8") as f:
for i, seg in enumerate(segments, 1):
f.write(f"{i}\n{sec2srt(seg[0])} --> {sec2srt(seg[1])}\n{seg[2]}\n\n")
def write_vtt(segments, words, path):
def sec2vtt(t):
h, rem = divmod(int(float(t)), 3600)
m, s = divmod(rem, 60)
ms = int((float(t) - int(float(t))) * 1000)
return f"{h:02}:{m:02}:{s:02}.{ms:03}"
with open(path, "w", encoding="utf-8") as f:
f.write("WEBVTT\n\n")
word_idx = 0
for seg in segments:
s_start = float(seg[0])
s_end = float(seg[1])
s_text = seg[2]
# このセグメントに含まれる単語を抽出
segment_words = []
while word_idx < len(words):
w = words[word_idx]
w_start = float(w[0])
w_end = float(w[1])
if w_start >= s_start and w_end <= s_end:
segment_words.append(w)
word_idx += 1
elif w_end < s_start:
word_idx += 1
else:
break
# 各単語ごとにタイムスタンプを生成
for i, w in enumerate(segment_words):
w_start = float(w[0])
w_end = float(w[1])
w_text = w[2]
# 現在の単語を強調表示し、他の単語は通常表示
colored_text = ""
for j, other_w in enumerate(segment_words):
if j == i:
colored_text += f"<c.yellow><b>{other_w[2]}</b></c> "
else:
colored_text += f"{other_w[2]} "
f.write(f"{sec2vtt(w_start)} --> {sec2vtt(w_end)}\n{colored_text.strip()}\n\n")
def write_json(segments, words, path):
result = {"segments": []}
word_idx = 0
for s in segments:
s_start = float(s[0])
s_end = float(s[1])
s_text = s[2]
word_list = []
while word_idx < len(words):
w = words[word_idx]
w_start = float(w[0])
w_end = float(w[1])
if w_start >= s_start and w_end <= s_end:
word_list.append({"start": w_start, "end": w_end, "word": w[2]})
word_idx += 1
elif w_end < s_start:
word_idx += 1
else:
break
result["segments"].append({
"start": s_start,
"end": s_end,
"text": s_text,
"words": word_list
})
with open(path, "w", encoding="utf-8") as f:
json.dump(result, f, ensure_ascii=False, indent=2)
def write_lrc(segments, path):
def sec2lrc(t):
m, s = divmod(float(t), 60)
return f"[{int(m):02}:{s:05.2f}]"
with open(path, "w", encoding="utf-8") as f:
for seg in segments:
f.write(f"{sec2lrc(seg[0])}{seg[2]}\n")
article = (
"<p style='font-size: 1.1em;'>"
"This demo showcases <code><a href='https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2'>parakeet-tdt-0.6b-v2</a></code>, a 600M-parameter model for high-quality English ASR.<br>"
"<em>Now optimised for long recordings (hours) with automatic chunking & memory control.</em>"
"</p>"
"<p><strong style='color: red; font-size: 1.2em;'>Key Features:</strong></p>"
"<ul style='font-size: 1.1em;'>"
" <li>Automatic punctuation and capitalization</li>"
" <li>Accurate word-level timestamps (click on a segment in the table below to play it!)</li>"
" <li>Character-level timestamps now available in the 'Character View' tab.</li>"
" <li>Efficiently transcribes long audio segments (<strong>updated to support upto 3 hours</strong>) <small>(For even longer audios, see <a href='https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py' target='_blank'>this script</a>)</small></li>"
" <li>Robust performance on spoken numbers, and song lyrics transcription </li>"
"</ul>"
"<p style='font-size: 1.1em;'>"
"This model is <strong>available for commercial and non-commercial use</strong>."
"</p>"
"<p style='text-align: center;'>"
"<a href='https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2' target='_blank'>🎙️ Learn more about the Model</a> | "
"<a href='https://arxiv.org/abs/2305.05084' target='_blank'>📄 Fast Conformer paper</a> | "
"<a href='https://arxiv.org/abs/2304.06795' target='_blank'>📚 TDT paper</a> | "
"<a href='https://github.com/NVIDIA/NeMo' target='_blank'>🧑💻 NeMo Repository</a>"
"</p>"
)
examples = [
["data/example-yt_saTD1u8PorI.mp3"],
]
nvidia_theme = gr_themes.Default(
primary_hue=gr_themes.Color(
c50="#E6F1D9", c100="#CEE3B3", c200="#B5D58C", c300="#9CC766",
c400="#84B940", c500="#76B900", c600="#68A600", c700="#5A9200",
c800="#4C7E00", c900="#3E6A00", c950="#2F5600"
),
neutral_hue="gray",
font=[gr_themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
).set()
with gr.Blocks(theme=nvidia_theme) as demo:
model_display_name = MODEL_NAME.split('/')[-1] if '/' in MODEL_NAME else MODEL_NAME
gr.Markdown(f"<h1 style='text-align: center; margin: 0 auto;'>Speech Transcription with {model_display_name} <span style='font-size:0.6em;'>(Long-audio ready)</span></h1>")
gr.HTML(article)
current_audio_path_state = gr.State(None)
raw_timestamps_list_state = gr.State([])
session_dir_state = gr.State()
demo.load(start_session, outputs=[session_dir_state])
with gr.Tabs():
with gr.TabItem("Audio File"):
file_input = gr.Audio(sources=["upload"], type="filepath", label="Upload Audio File")
gr.Examples(examples=examples, inputs=[file_input], label="Example Audio Files (Click to Load)")
file_transcribe_btn = gr.Button("Transcribe Uploaded File", variant="primary")
with gr.TabItem("Microphone"):
mic_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio")
mic_transcribe_btn = gr.Button("Transcribe Microphone Input", variant="primary")
gr.Markdown("---")
gr.Markdown("<p><strong style='color: #FF0000; font-size: 1.2em;'>Transcription Results</strong></p>")
download_btn = gr.DownloadButton(label="Download Segment Transcript (CSV)", visible=False)
srt_btn = gr.DownloadButton(label="Download SRT", visible=False)
vtt_btn = gr.DownloadButton(label="Download VTT", visible=False)
json_btn = gr.DownloadButton(label="Download JSON", visible=False)
lrc_btn = gr.DownloadButton(label="Download LRC", visible=False)
with gr.Tabs():
with gr.TabItem("Segment View (Click row to play segment)"):
vis_timestamps_df = gr.DataFrame(
headers=["Start (s)", "End (s)", "Segment"],
datatype=["number", "number", "str"],
wrap=True,
)
selected_segment_player = gr.Audio(label="Selected Segment", interactive=False)
with gr.TabItem("Word View"):
word_vis_df = gr.DataFrame(
headers=["Start (s)", "End (s)", "Word"],
datatype=["number", "number", "str"],
wrap=False,
)
mic_transcribe_btn.click(
fn=get_transcripts_and_raw_times,
inputs=[mic_input, session_dir_state],
outputs=[vis_timestamps_df, raw_timestamps_list_state, word_vis_df, current_audio_path_state, download_btn, srt_btn, vtt_btn, json_btn, lrc_btn],
api_name="transcribe_mic"
)
file_transcribe_btn.click(
fn=get_transcripts_and_raw_times,
inputs=[file_input, session_dir_state],
outputs=[vis_timestamps_df, raw_timestamps_list_state, word_vis_df, current_audio_path_state, download_btn, srt_btn, vtt_btn, json_btn, lrc_btn],
api_name="transcribe_file"
)
vis_timestamps_df.select(
fn=play_segment,
inputs=[raw_timestamps_list_state, current_audio_path_state],
outputs=[selected_segment_player],
)
demo.unload(end_session)
if __name__ == "__main__":
print("Launching Gradio Demo...")
# タイムアウト対策としてキューサイズと同時実行数を抑制
demo.queue(
max_size=5,
default_concurrency_limit=1
)
demo.launch() |