|
import torch |
|
import torch.nn.functional as F |
|
import numpy as np |
|
import json |
|
import os |
|
import multiprocessing as mp |
|
from datasets import load_dataset |
|
from snac import SNAC |
|
from tqdm import tqdm |
|
from collections import defaultdict |
|
import logging |
|
import traceback |
|
import time |
|
import queue |
|
import torchaudio |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
SNAC_SAMPLE_RATE = 24000 |
|
OUTPUT_DIR = "processed_emilia" |
|
ROWS_PER_SAVE = 1000 |
|
ROWS_PER_PUSH = 10000000 |
|
NUM_WORKERS = 64 |
|
BATCH_SIZE = 1000 |
|
STOP_AFTER = None |
|
NUM_GPUS = torch.cuda.device_count() |
|
|
|
|
|
STAGES = [ |
|
"Initializing CUDA (Starting)", |
|
"Initializing CUDA (Finished)", |
|
"Loading SNAC model (Starting)", |
|
"Loading SNAC model (Finished)", |
|
"Loading dataset (Starting)", |
|
"Loading dataset (Finished)", |
|
"Resolving data files (Starting)", |
|
"Resolving data files (Finished)", |
|
"Preparing batch (Starting)", |
|
"Preparing batch (Finished)", |
|
"Encoding audio (Starting)", |
|
"Encoding audio (Finished)", |
|
"Post-processing (Starting)", |
|
"Post-processing (Finished)", |
|
"Saving results (Starting)", |
|
"Saving results (Finished)", |
|
"Completed", |
|
"Error" |
|
] |
|
|
|
|
|
def chunk_and_pad_audio(audio, chunk_size): |
|
length = audio.shape[-1] |
|
padded_length = ((length + chunk_size - 1) // chunk_size) * chunk_size |
|
padded_audio = F.pad(audio, (0, padded_length - length), mode="constant", value=0) |
|
batched_audio = padded_audio.unfold(-1, size=chunk_size, step=chunk_size) |
|
return batched_audio |
|
|
|
|
|
def generate_snac_encoding(audio, model): |
|
device = next(model.parameters()).device |
|
waveform = torch.tensor(audio["array"]).float().to(device) |
|
if audio["sampling_rate"] != SNAC_SAMPLE_RATE: |
|
resampler = torchaudio.transforms.Resample( |
|
orig_freq=audio["sampling_rate"], new_freq=SNAC_SAMPLE_RATE |
|
).to(device) |
|
waveform = resampler(waveform) |
|
if waveform.dim() == 2: |
|
waveform = waveform.mean(dim=0, keepdim=True) |
|
elif waveform.dim() == 1: |
|
waveform = waveform.unsqueeze(0) |
|
|
|
num_second = 1 |
|
chunk_size_initial = num_second * SNAC_SAMPLE_RATE |
|
lcm = np.lcm.reduce([model.vq_strides[0], model.attn_window_size or 1]) |
|
pad_to = model.hop_length * lcm |
|
chunk_size = int(np.ceil(chunk_size_initial / pad_to) * pad_to) |
|
audio = chunk_and_pad_audio(waveform, chunk_size) |
|
audio = audio.permute(1, 0, 2) |
|
|
|
codes_list = [] |
|
with torch.no_grad(): |
|
for chunk in audio: |
|
codes = model.encode(chunk.unsqueeze(0)) |
|
codes = [c.cpu() for c in codes] |
|
codes_list.append(codes) |
|
|
|
codes_list = [torch.cat(codes_list, dim=0) for codes_list in zip(*codes_list)] |
|
codes_list = [code.reshape(-1).cpu().tolist() for code in codes_list] |
|
|
|
snac_dict = {f"snac_{i}": codes for i, codes in enumerate(codes_list)} |
|
return snac_dict |
|
|
|
|
|
def process_audio_batch(batch, model): |
|
results = [] |
|
for item in batch: |
|
try: |
|
snac_tokens = generate_snac_encoding(item['mp3'], model) |
|
if not snac_tokens: |
|
raise ValueError("Generated SNAC tokens are empty") |
|
|
|
results.append({ |
|
"__key__": item["__key__"], |
|
"__url__": item["__url__"], |
|
"json": item['json'], |
|
"path": item['mp3']["path"], |
|
**snac_tokens |
|
}) |
|
except Exception as e: |
|
logging.error(f"Error during post-processing: {str(e)}") |
|
return results |
|
|
|
|
|
def save_to_jsonl(data, file_path): |
|
with open(file_path, "a") as f: |
|
for item in data: |
|
json.dump(item, f) |
|
f.write("\n") |
|
|
|
|
|
def process_shard(worker_id, status_queue, progress_queue): |
|
try: |
|
status_queue.put((worker_id, "Initializing CUDA (Starting)")) |
|
gpu_id = worker_id % NUM_GPUS |
|
device = torch.device(f"cuda:{gpu_id}") |
|
status_queue.put((worker_id, "Initializing CUDA (Finished)")) |
|
|
|
status_queue.put((worker_id, "Loading SNAC model (Starting)")) |
|
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").eval().to(device) |
|
status_queue.put((worker_id, "Loading SNAC model (Finished)")) |
|
|
|
status_queue.put((worker_id, "Loading dataset (Starting)")) |
|
dataset = load_dataset("amphion/Emilia-Dataset", streaming=True) |
|
status_queue.put((worker_id, "Loading dataset (Finished)")) |
|
|
|
status_queue.put((worker_id, "Resolving data files (Starting)")) |
|
shard_iter = ( |
|
item for i, item in enumerate(dataset["train"]) if i % NUM_WORKERS == worker_id |
|
) |
|
first_item = next(shard_iter) |
|
status_queue.put((worker_id, "Resolving data files (Finished)")) |
|
|
|
worker_output_dir = os.path.join(OUTPUT_DIR, f"worker_{worker_id}") |
|
os.makedirs(worker_output_dir, exist_ok=True) |
|
output_file = os.path.join( |
|
worker_output_dir, f"processed_worker_{worker_id}.jsonl" |
|
) |
|
|
|
batch = [first_item] |
|
total_processed = 0 |
|
|
|
while True: |
|
try: |
|
item = next(shard_iter) |
|
batch.append(item) |
|
|
|
if len(batch) == BATCH_SIZE: |
|
status_queue.put((worker_id, "Preparing batch (Starting)")) |
|
results = process_audio_batch(batch, model) |
|
status_queue.put((worker_id, "Preparing batch (Finished)")) |
|
|
|
status_queue.put((worker_id, "Saving results (Starting)")) |
|
save_to_jsonl(results, output_file) |
|
status_queue.put((worker_id, "Saving results (Finished)")) |
|
total_processed += len(results) |
|
progress_queue.put(len(results)) |
|
batch = [] |
|
|
|
if total_processed >= ROWS_PER_PUSH: |
|
break |
|
|
|
if STOP_AFTER is not None and total_processed // BATCH_SIZE >= STOP_AFTER: |
|
break |
|
except StopIteration: |
|
break |
|
|
|
|
|
if batch: |
|
results = process_audio_batch(batch, model) |
|
save_to_jsonl(results, output_file) |
|
total_processed += len(results) |
|
progress_queue.put(len(results)) |
|
|
|
|
|
status_queue.put((worker_id, "Completed")) |
|
|
|
except Exception as e: |
|
logging.error( |
|
f"Worker {worker_id} encountered an error: {str(e)}\n{traceback.format_exc()}" |
|
) |
|
status_queue.put((worker_id, "Error")) |
|
|
|
|
|
def main(): |
|
os.makedirs(OUTPUT_DIR, exist_ok=True) |
|
|
|
ctx = mp.get_context('spawn') |
|
status_queue = ctx.Queue() |
|
progress_queue = ctx.Queue() |
|
|
|
print(f"Initializing {NUM_WORKERS} workers across {NUM_GPUS} GPUs...") |
|
|
|
|
|
processes = [ |
|
ctx.Process(target=process_shard, args=(i, status_queue, progress_queue)) |
|
for i in range(NUM_WORKERS) |
|
] |
|
for p in processes: |
|
p.start() |
|
|
|
stage_counts = { |
|
stage: tqdm(total=NUM_WORKERS, desc=f"{stage:<30}", position=i, leave=True) |
|
for i, stage in enumerate(STAGES) |
|
} |
|
|
|
total_rows = NUM_WORKERS * BATCH_SIZE * STOP_AFTER if STOP_AFTER else ROWS_PER_PUSH |
|
overall_progress = tqdm( |
|
total=total_rows, desc="Overall Progress", position=len(STAGES), leave=True |
|
) |
|
|
|
worker_stages = defaultdict(lambda: "Initializing CUDA (Starting)") |
|
|
|
while any(p.is_alive() for p in processes): |
|
try: |
|
worker_id, status = status_queue.get(timeout=0.1) |
|
old_stage = worker_stages[worker_id] |
|
worker_stages[worker_id] = status |
|
|
|
if old_stage != status: |
|
if old_stage != "Completed" and old_stage != "Error": |
|
stage_counts[old_stage].update(-1) |
|
stage_counts[status].update(1) |
|
except queue.Empty: |
|
pass |
|
|
|
try: |
|
progress = progress_queue.get(timeout=0.1) |
|
overall_progress.update(progress) |
|
except queue.Empty: |
|
pass |
|
|
|
for p in processes: |
|
p.join() |
|
|
|
for bar in stage_counts.values(): |
|
bar.close() |
|
overall_progress.close() |
|
|
|
print("All workers finished processing.") |
|
|
|
|
|
completed_workers = sum(1 for stage in worker_stages.values() if stage == "Completed") |
|
error_workers = sum(1 for stage in worker_stages.values() if stage == "Error") |
|
print(f"Completed workers: {completed_workers}") |
|
print(f"Workers with errors: {error_workers}") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|