File size: 15,558 Bytes
305c59b 7fe102a 16649a3 7fe102a 6bfef72 ba6b40b 573f5cd 16649a3 7fe102a ba6b40b 0a51f5f 16649a3 7fe102a cfd9ff1 6b2690e 0a51f5f 6b2690e 0a51f5f 6b2690e 0a51f5f cfd9ff1 6b2690e 6bfef72 13b616e 573f5cd 06326e5 573f5cd 06326e5 573f5cd 06326e5 573f5cd 06326e5 13b616e 06326e5 700dfd4 4f33135 573f5cd 06326e5 573f5cd 4f33135 573f5cd 4f33135 f028775 13b616e 06326e5 f028775 2eebdd2 dff986d 868debc 16649a3 7fe102a 16649a3 0a51f5f 16649a3 0a51f5f 7fe102a 0a51f5f cfd9ff1 16649a3 0a51f5f 7fe102a 16649a3 1e0f1bc 7fe102a 868debc 6b2690e 868debc 7fe102a 868debc 7fe102a 0a51f5f 16649a3 7fe102a 868debc dff986d 573f5cd 2eebdd2 573f5cd 06326e5 2eebdd2 573f5cd f028775 6b2690e 6bfef72 dff986d 6bfef72 dff986d 573f5cd 2eebdd2 f028775 1e0f1bc 7fe102a 868debc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 |
import gradio as gr
import whisper
import torch
import os
from pydub import AudioSegment, silence
from faster_whisper import WhisperModel # Import faster-whisper
import numpy as np
from scipy.io import wavfile
# Mapping of model names to Whisper model sizes
MODELS = {
"Tiny (Fastest)": "tiny",
"Base (Faster)": "base",
"Small (Balanced)": "small",
"Medium (Accurate)": "medium",
"Large (Most Accurate)": "large",
"Faster Whisper Large v3": "Systran/faster-whisper-large-v3" # Renamed and set as default
}
# Mapping of full language names to language codes
LANGUAGE_NAME_TO_CODE = {
"Auto Detect": "Auto Detect",
"English": "en",
"Chinese": "zh",
"German": "de",
"Spanish": "es",
"Russian": "ru",
"Korean": "ko",
"French": "fr",
"Japanese": "ja",
"Portuguese": "pt",
"Turkish": "tr",
"Polish": "pl",
"Catalan": "ca",
"Dutch": "nl",
"Arabic": "ar",
"Swedish": "sv",
"Italian": "it",
"Indonesian": "id",
"Hindi": "hi",
"Finnish": "fi",
"Vietnamese": "vi",
"Hebrew": "he",
"Ukrainian": "uk",
"Greek": "el",
"Malay": "ms",
"Czech": "cs",
"Romanian": "ro",
"Danish": "da",
"Hungarian": "hu",
"Tamil": "ta",
"Norwegian": "no",
"Thai": "th",
"Urdu": "ur",
"Croatian": "hr",
"Bulgarian": "bg",
"Lithuanian": "lt",
"Latin": "la",
"Maori": "mi",
"Malayalam": "ml",
"Welsh": "cy",
"Slovak": "sk",
"Telugu": "te",
"Persian": "fa",
"Latvian": "lv",
"Bengali": "bn",
"Serbian": "sr",
"Azerbaijani": "az",
"Slovenian": "sl",
"Kannada": "kn",
"Estonian": "et",
"Macedonian": "mk",
"Breton": "br",
"Basque": "eu",
"Icelandic": "is",
"Armenian": "hy",
"Nepali": "ne",
"Mongolian": "mn",
"Bosnian": "bs",
"Kazakh": "kk",
"Albanian": "sq",
"Swahili": "sw",
"Galician": "gl",
"Marathi": "mr",
"Punjabi": "pa",
"Sinhala": "si", # Sinhala support
"Khmer": "km",
"Shona": "sn",
"Yoruba": "yo",
"Somali": "so",
"Afrikaans": "af",
"Occitan": "oc",
"Georgian": "ka",
"Belarusian": "be",
"Tajik": "tg",
"Sindhi": "sd",
"Gujarati": "gu",
"Amharic": "am",
"Yiddish": "yi",
"Lao": "lo",
"Uzbek": "uz",
"Faroese": "fo",
"Haitian Creole": "ht",
"Pashto": "ps",
"Turkmen": "tk",
"Nynorsk": "nn",
"Maltese": "mt",
"Sanskrit": "sa",
"Luxembourgish": "lb",
"Burmese": "my",
"Tibetan": "bo",
"Tagalog": "tl",
"Malagasy": "mg",
"Assamese": "as",
"Tatar": "tt",
"Hawaiian": "haw",
"Lingala": "ln",
"Hausa": "ha",
"Bashkir": "ba",
"Javanese": "jw",
"Sundanese": "su",
}
# Reverse mapping of language codes to full language names
CODE_TO_LANGUAGE_NAME = {v: k for k, v in LANGUAGE_NAME_TO_CODE.items()}
def detect_language(audio_file):
"""Detect the language of the audio file."""
# Define device and compute type for faster-whisper
device = "cuda" if torch.cuda.is_available() else "cpu"
compute_type = "float32" if device == "cuda" else "int8"
# Load the faster-whisper model for language detection
model = WhisperModel(MODELS["Faster Whisper Large v3"], device=device, compute_type=compute_type)
# Convert audio to 16kHz mono for better compatibility
audio = AudioSegment.from_file(audio_file)
audio = audio.set_frame_rate(16000).set_channels(1)
processed_audio_path = "processed_audio.wav"
audio.export(processed_audio_path, format="wav")
# Detect the language using faster-whisper
segments, info = model.transcribe(processed_audio_path, task="translate", language=None)
detected_language_code = info.language
# Get the full language name from the code
detected_language = CODE_TO_LANGUAGE_NAME.get(detected_language_code, "Unknown Language")
# Clean up processed audio file
os.remove(processed_audio_path)
return f"Detected Language: {detected_language}"
def remove_silence(audio_file, silence_threshold=-40, min_silence_len=500):
"""
Remove silence from the audio file using AI-based silence detection.
Args:
audio_file (str): Path to the input audio file.
silence_threshold (int): Silence threshold in dB. Default is -40 dB.
min_silence_len (int): Minimum length of silence to remove in milliseconds. Default is 500 ms.
Returns:
str: Path to the output audio file with silence removed.
"""
# Load the audio file
audio = AudioSegment.from_file(audio_file)
# Detect silent chunks
silent_chunks = silence.detect_silence(
audio,
min_silence_len=min_silence_len,
silence_thresh=silence_threshold
)
# Remove silent chunks
non_silent_audio = AudioSegment.empty()
start = 0
for chunk in silent_chunks:
non_silent_audio += audio[start:chunk[0]] # Add non-silent part
start = chunk[1] # Move to the end of the silent chunk
non_silent_audio += audio[start:] # Add the remaining part
# Export the processed audio
output_path = "silence_removed_audio.wav"
non_silent_audio.export(output_path, format="wav")
return output_path
def convert_to_wav(audio_file):
"""
Convert the input audio file to WAV format.
Args:
audio_file (str): Path to the input audio file.
Returns:
str: Path to the converted WAV file.
"""
audio = AudioSegment.from_file(audio_file)
wav_path = "converted_audio.wav"
audio.export(wav_path, format="wav")
return wav_path
def detect_voice_activity(audio_file, threshold=0.02):
"""
Detect voice activity in the audio file and trim the audio to include only voice segments.
Args:
audio_file (str): Path to the input audio file.
threshold (float): Amplitude threshold for voice detection. Default is 0.02.
Returns:
str: Path to the output audio file with only voice segments.
"""
# Convert the input audio to WAV format
wav_path = convert_to_wav(audio_file)
# Load the WAV file
sample_rate, data = wavfile.read(wav_path)
# If the audio is stereo, convert it to mono by averaging the channels
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Normalize the audio data to the range [-1, 1]
if data.dtype != np.float32:
data = data.astype(np.float32) / np.iinfo(data.dtype).max
# Detect voice activity
voice_segments = []
is_voice = False
start = 0
for i, sample in enumerate(data):
if abs(sample) > threshold and not is_voice:
is_voice = True
start = i
elif abs(sample) <= threshold and is_voice:
is_voice = False
voice_segments.append((start, i))
# If the last segment is voice, add it
if is_voice:
voice_segments.append((start, len(data)))
# Trim the audio to include only voice segments
trimmed_audio = np.array([], dtype=np.float32)
for segment in voice_segments:
trimmed_audio = np.concatenate((trimmed_audio, data[segment[0]:segment[1]]))
# Convert the trimmed audio back to 16-bit integer format
trimmed_audio_int16 = np.int16(trimmed_audio * 32767)
# Export the trimmed audio
output_path = "voice_trimmed_audio.wav"
wavfile.write(output_path, sample_rate, trimmed_audio_int16)
# Clean up the converted WAV file
os.remove(wav_path)
return output_path
def detect_and_trim_audio(audio_file, threshold=0.02):
"""
Detect voice activity in the audio file, trim the audio to include only voice segments,
and return the timestamps of the detected segments.
Args:
audio_file (str): Path to the input audio file.
threshold (float): Amplitude threshold for voice detection. Default is 0.02.
Returns:
str: Path to the output audio file with only voice segments.
list: List of timestamps (start, end) for the detected segments.
"""
# Convert the input audio to WAV format
wav_path = convert_to_wav(audio_file)
# Load the WAV file
sample_rate, data = wavfile.read(wav_path)
# If the audio is stereo, convert it to mono by averaging the channels
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Normalize the audio data to the range [-1, 1]
if data.dtype != np.float32:
data = data.astype(np.float32) / np.iinfo(data.dtype).max
# Detect voice activity
voice_segments = []
is_voice = False
start = 0
for i, sample in enumerate(data):
if abs(sample) > threshold and not is_voice:
is_voice = True
start = i
elif abs(sample) <= threshold and is_voice:
is_voice = False
voice_segments.append((start, i))
# If the last segment is voice, add it
if is_voice:
voice_segments.append((start, len(data)))
# Trim the audio to include only voice segments
trimmed_audio = np.array([], dtype=np.float32)
for segment in voice_segments:
trimmed_audio = np.concatenate((trimmed_audio, data[segment[0]:segment[1]]))
# Convert the trimmed audio back to 16-bit integer format
trimmed_audio_int16 = np.int16(trimmed_audio * 32767)
# Export the trimmed audio
output_path = "voice_trimmed_audio.wav"
wavfile.write(output_path, sample_rate, trimmed_audio_int16)
# Calculate timestamps in seconds
timestamps = [(start / sample_rate, end / sample_rate) for start, end in voice_segments]
# Clean up the converted WAV file
os.remove(wav_path)
return output_path, timestamps
def transcribe_audio(audio_file, language="Auto Detect", model_size="Faster Whisper Large v3"):
"""Transcribe the audio file."""
# Convert audio to 16kHz mono for better compatibility
audio = AudioSegment.from_file(audio_file)
audio = audio.set_frame_rate(16000).set_channels(1)
processed_audio_path = "processed_audio.wav"
audio.export(processed_audio_path, format="wav")
# Load the appropriate model
if model_size == "Faster Whisper Large v3":
# Define device and compute type for faster-whisper
device = "cuda" if torch.cuda.is_available() else "cpu"
compute_type = "float32" if device == "cuda" else "int8"
# Use faster-whisper for the Systran model
model = WhisperModel(MODELS[model_size], device=device, compute_type=compute_type)
segments, info = model.transcribe(
processed_audio_path,
task="transcribe",
word_timestamps=True,
repetition_penalty=1.1,
temperature=[0.0, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1.0],
)
transcription = " ".join([segment.text for segment in segments])
detected_language_code = info.language
detected_language = CODE_TO_LANGUAGE_NAME.get(detected_language_code, "Unknown Language")
else:
# Use the standard Whisper model
model = whisper.load_model(MODELS[model_size])
# Transcribe the audio
if language == "Auto Detect":
result = model.transcribe(processed_audio_path, fp16=False) # Auto-detect language
detected_language_code = result.get("language", "unknown")
detected_language = CODE_TO_LANGUAGE_NAME.get(detected_language_code, "Unknown Language")
else:
language_code = LANGUAGE_NAME_TO_CODE.get(language, "en") # Default to English if not found
result = model.transcribe(processed_audio_path, language=language_code, fp16=False)
detected_language = language
transcription = result["text"]
# Clean up processed audio file
os.remove(processed_audio_path)
# Return transcription and detected language
return f"Detected Language: {detected_language}\n\nTranscription:\n{transcription}"
# Define the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Audio Transcription and Language Detection")
with gr.Tab("Detect Language"):
gr.Markdown("Upload an audio file to detect its language.")
detect_audio_input = gr.Audio(type="filepath", label="Upload Audio File")
detect_language_output = gr.Textbox(label="Detected Language")
detect_button = gr.Button("Detect Language")
with gr.Tab("Transcribe Audio"):
gr.Markdown("Upload an audio file, select a language (or choose 'Auto Detect'), and choose a model for transcription.")
transcribe_audio_input = gr.Audio(type="filepath", label="Upload Audio File")
language_dropdown = gr.Dropdown(
choices=list(LANGUAGE_NAME_TO_CODE.keys()), # Full language names
label="Select Language",
value="Auto Detect"
)
model_dropdown = gr.Dropdown(
choices=list(MODELS.keys()), # Model options
label="Select Model",
value="Faster Whisper Large v3", # Default to "Faster Whisper Large v3"
interactive=True # Allow model selection by default
)
transcribe_output = gr.Textbox(label="Transcription and Detected Language")
transcribe_button = gr.Button("Transcribe Audio")
with gr.Tab("Remove Silence"):
gr.Markdown("Upload an audio file to remove silence.")
silence_audio_input = gr.Audio(type="filepath", label="Upload Audio File")
silence_threshold_slider = gr.Slider(
minimum=-60, maximum=-20, value=-40, step=1,
label="Silence Threshold (dB)",
info="Lower values detect quieter sounds as silence."
)
min_silence_len_slider = gr.Slider(
minimum=100, maximum=2000, value=500, step=100,
label="Minimum Silence Length (ms)",
info="Minimum duration of silence to remove."
)
silence_output = gr.Audio(label="Processed Audio (Silence Removed)", type="filepath")
silence_button = gr.Button("Remove Silence")
with gr.Tab("Voice Detection and Trimming"):
gr.Markdown("Upload an audio file to detect voice activity and trim the audio.")
voice_audio_input = gr.Audio(type="filepath", label="Upload Audio File")
voice_threshold_slider = gr.Slider(
minimum=0.01, maximum=0.1, value=0.02, step=0.01,
label="Voice Detection Threshold",
info="Higher values detect louder sounds as voice."
)
voice_output = gr.Audio(label="Trimmed Audio", type="filepath")
timestamps_output = gr.Textbox(label="Detected Timestamps (seconds)")
voice_button = gr.Button("Detect and Trim Voice")
# Link buttons to functions
detect_button.click(detect_language, inputs=detect_audio_input, outputs=detect_language_output)
transcribe_button.click(
transcribe_audio,
inputs=[transcribe_audio_input, language_dropdown, model_dropdown],
outputs=transcribe_output
)
silence_button.click(
remove_silence,
inputs=[silence_audio_input, silence_threshold_slider, min_silence_len_slider],
outputs=silence_output
)
voice_button.click(
detect_and_trim_audio,
inputs=[voice_audio_input, voice_threshold_slider],
outputs=[voice_output, timestamps_output]
)
# Launch the Gradio interface
demo.launch() |