Upload tes13.py with huggingface_hub
Browse files
tes13.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import multiprocessing as mp
|
7 |
+
from datasets import load_dataset
|
8 |
+
from snac import SNAC
|
9 |
+
from tqdm import tqdm
|
10 |
+
from collections import defaultdict
|
11 |
+
import logging
|
12 |
+
import traceback
|
13 |
+
import time
|
14 |
+
import queue
|
15 |
+
import torchaudio
|
16 |
+
|
17 |
+
# Set up logging
|
18 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
19 |
+
|
20 |
+
# Constants
|
21 |
+
SNAC_SAMPLE_RATE = 24000
|
22 |
+
OUTPUT_DIR = "processed_emilia"
|
23 |
+
ROWS_PER_SAVE = 1000
|
24 |
+
ROWS_PER_PUSH = 10000000
|
25 |
+
NUM_WORKERS = 64
|
26 |
+
BATCH_SIZE = 1000
|
27 |
+
STOP_AFTER = None
|
28 |
+
NUM_GPUS = torch.cuda.device_count()
|
29 |
+
|
30 |
+
# Worker stages
|
31 |
+
STAGES = [
|
32 |
+
"Initializing CUDA (Starting)",
|
33 |
+
"Initializing CUDA (Finished)",
|
34 |
+
"Loading SNAC model (Starting)",
|
35 |
+
"Loading SNAC model (Finished)",
|
36 |
+
"Loading dataset (Starting)",
|
37 |
+
"Loading dataset (Finished)",
|
38 |
+
"Resolving data files (Starting)",
|
39 |
+
"Resolving data files (Finished)",
|
40 |
+
"Preparing batch (Starting)",
|
41 |
+
"Preparing batch (Finished)",
|
42 |
+
"Encoding audio (Starting)",
|
43 |
+
"Encoding audio (Finished)",
|
44 |
+
"Post-processing (Starting)",
|
45 |
+
"Post-processing (Finished)",
|
46 |
+
"Saving results (Starting)",
|
47 |
+
"Saving results (Finished)",
|
48 |
+
"Completed",
|
49 |
+
"Error"
|
50 |
+
]
|
51 |
+
|
52 |
+
|
53 |
+
def chunk_and_pad_audio(audio, chunk_size):
|
54 |
+
length = audio.shape[-1]
|
55 |
+
padded_length = ((length + chunk_size - 1) // chunk_size) * chunk_size
|
56 |
+
padded_audio = F.pad(audio, (0, padded_length - length), mode="constant", value=0)
|
57 |
+
batched_audio = padded_audio.unfold(-1, size=chunk_size, step=chunk_size)
|
58 |
+
return batched_audio
|
59 |
+
|
60 |
+
|
61 |
+
def generate_snac_encoding(audio, model):
|
62 |
+
device = next(model.parameters()).device
|
63 |
+
waveform = torch.tensor(audio["array"]).float().to(device)
|
64 |
+
if audio["sampling_rate"] != SNAC_SAMPLE_RATE:
|
65 |
+
resampler = torchaudio.transforms.Resample(
|
66 |
+
orig_freq=audio["sampling_rate"], new_freq=SNAC_SAMPLE_RATE
|
67 |
+
).to(device)
|
68 |
+
waveform = resampler(waveform)
|
69 |
+
if waveform.dim() == 2:
|
70 |
+
waveform = waveform.mean(dim=0, keepdim=True)
|
71 |
+
elif waveform.dim() == 1:
|
72 |
+
waveform = waveform.unsqueeze(0)
|
73 |
+
|
74 |
+
num_second = 1
|
75 |
+
chunk_size_initial = num_second * SNAC_SAMPLE_RATE
|
76 |
+
lcm = np.lcm.reduce([model.vq_strides[0], model.attn_window_size or 1])
|
77 |
+
pad_to = model.hop_length * lcm
|
78 |
+
chunk_size = int(np.ceil(chunk_size_initial / pad_to) * pad_to)
|
79 |
+
audio = chunk_and_pad_audio(waveform, chunk_size)
|
80 |
+
audio = audio.permute(1, 0, 2)
|
81 |
+
|
82 |
+
codes_list = []
|
83 |
+
with torch.no_grad():
|
84 |
+
for chunk in audio:
|
85 |
+
codes = model.encode(chunk.unsqueeze(0))
|
86 |
+
codes = [c.cpu() for c in codes]
|
87 |
+
codes_list.append(codes)
|
88 |
+
|
89 |
+
codes_list = [torch.cat(codes_list, dim=0) for codes_list in zip(*codes_list)]
|
90 |
+
codes_list = [code.reshape(-1).cpu().tolist() for code in codes_list]
|
91 |
+
# Create a dictionary with keys "snac_0", "snac_1", etc.
|
92 |
+
snac_dict = {f"snac_{i}": codes for i, codes in enumerate(codes_list)}
|
93 |
+
return snac_dict
|
94 |
+
|
95 |
+
|
96 |
+
def process_audio_batch(batch, model):
|
97 |
+
results = []
|
98 |
+
for item in batch:
|
99 |
+
try:
|
100 |
+
snac_tokens = generate_snac_encoding(item['mp3'], model)
|
101 |
+
if not snac_tokens:
|
102 |
+
raise ValueError("Generated SNAC tokens are empty")
|
103 |
+
|
104 |
+
results.append({
|
105 |
+
"__key__": item["__key__"],
|
106 |
+
"__url__": item["__url__"],
|
107 |
+
"json": item['json'],
|
108 |
+
"path": item['mp3']["path"],
|
109 |
+
**snac_tokens # Add the snac tokens dictionary
|
110 |
+
})
|
111 |
+
except Exception as e:
|
112 |
+
logging.error(f"Error during post-processing: {str(e)}")
|
113 |
+
return results
|
114 |
+
|
115 |
+
|
116 |
+
def save_to_jsonl(data, file_path):
|
117 |
+
with open(file_path, "a") as f:
|
118 |
+
for item in data:
|
119 |
+
json.dump(item, f)
|
120 |
+
f.write("\n")
|
121 |
+
|
122 |
+
|
123 |
+
def process_shard(worker_id, status_queue, progress_queue):
|
124 |
+
try:
|
125 |
+
status_queue.put((worker_id, "Initializing CUDA (Starting)"))
|
126 |
+
gpu_id = worker_id % NUM_GPUS
|
127 |
+
device = torch.device(f"cuda:{gpu_id}")
|
128 |
+
status_queue.put((worker_id, "Initializing CUDA (Finished)"))
|
129 |
+
|
130 |
+
status_queue.put((worker_id, "Loading SNAC model (Starting)"))
|
131 |
+
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").eval().to(device)
|
132 |
+
status_queue.put((worker_id, "Loading SNAC model (Finished)"))
|
133 |
+
|
134 |
+
status_queue.put((worker_id, "Loading dataset (Starting)"))
|
135 |
+
dataset = load_dataset("amphion/Emilia-Dataset", streaming=True)
|
136 |
+
status_queue.put((worker_id, "Loading dataset (Finished)"))
|
137 |
+
|
138 |
+
status_queue.put((worker_id, "Resolving data files (Starting)"))
|
139 |
+
shard_iter = (
|
140 |
+
item for i, item in enumerate(dataset["train"]) if i % NUM_WORKERS == worker_id
|
141 |
+
)
|
142 |
+
first_item = next(shard_iter)
|
143 |
+
status_queue.put((worker_id, "Resolving data files (Finished)"))
|
144 |
+
|
145 |
+
worker_output_dir = os.path.join(OUTPUT_DIR, f"worker_{worker_id}")
|
146 |
+
os.makedirs(worker_output_dir, exist_ok=True)
|
147 |
+
output_file = os.path.join(
|
148 |
+
worker_output_dir, f"processed_worker_{worker_id}.jsonl"
|
149 |
+
)
|
150 |
+
|
151 |
+
batch = [first_item]
|
152 |
+
total_processed = 0
|
153 |
+
|
154 |
+
while True:
|
155 |
+
try:
|
156 |
+
item = next(shard_iter)
|
157 |
+
batch.append(item)
|
158 |
+
|
159 |
+
if len(batch) == BATCH_SIZE:
|
160 |
+
status_queue.put((worker_id, "Preparing batch (Starting)"))
|
161 |
+
results = process_audio_batch(batch, model)
|
162 |
+
status_queue.put((worker_id, "Preparing batch (Finished)"))
|
163 |
+
|
164 |
+
status_queue.put((worker_id, "Saving results (Starting)"))
|
165 |
+
save_to_jsonl(results, output_file)
|
166 |
+
status_queue.put((worker_id, "Saving results (Finished)"))
|
167 |
+
total_processed += len(results)
|
168 |
+
progress_queue.put(len(results))
|
169 |
+
batch = []
|
170 |
+
|
171 |
+
if total_processed >= ROWS_PER_PUSH:
|
172 |
+
break # Stop after reaching ROWS_PER_PUSH
|
173 |
+
|
174 |
+
if STOP_AFTER is not None and total_processed // BATCH_SIZE >= STOP_AFTER:
|
175 |
+
break
|
176 |
+
except StopIteration:
|
177 |
+
break
|
178 |
+
|
179 |
+
# Process any remaining items
|
180 |
+
if batch:
|
181 |
+
results = process_audio_batch(batch, model)
|
182 |
+
save_to_jsonl(results, output_file)
|
183 |
+
total_processed += len(results)
|
184 |
+
progress_queue.put(len(results))
|
185 |
+
|
186 |
+
|
187 |
+
status_queue.put((worker_id, "Completed"))
|
188 |
+
|
189 |
+
except Exception as e:
|
190 |
+
logging.error(
|
191 |
+
f"Worker {worker_id} encountered an error: {str(e)}\n{traceback.format_exc()}"
|
192 |
+
)
|
193 |
+
status_queue.put((worker_id, "Error"))
|
194 |
+
|
195 |
+
|
196 |
+
def main():
|
197 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
198 |
+
|
199 |
+
ctx = mp.get_context('spawn')
|
200 |
+
status_queue = ctx.Queue()
|
201 |
+
progress_queue = ctx.Queue()
|
202 |
+
|
203 |
+
print(f"Initializing {NUM_WORKERS} workers across {NUM_GPUS} GPUs...")
|
204 |
+
|
205 |
+
# Create and start worker processes
|
206 |
+
processes = [
|
207 |
+
ctx.Process(target=process_shard, args=(i, status_queue, progress_queue))
|
208 |
+
for i in range(NUM_WORKERS)
|
209 |
+
]
|
210 |
+
for p in processes:
|
211 |
+
p.start()
|
212 |
+
|
213 |
+
stage_counts = {
|
214 |
+
stage: tqdm(total=NUM_WORKERS, desc=f"{stage:<30}", position=i, leave=True)
|
215 |
+
for i, stage in enumerate(STAGES)
|
216 |
+
}
|
217 |
+
|
218 |
+
total_rows = NUM_WORKERS * BATCH_SIZE * STOP_AFTER if STOP_AFTER else ROWS_PER_PUSH
|
219 |
+
overall_progress = tqdm(
|
220 |
+
total=total_rows, desc="Overall Progress", position=len(STAGES), leave=True
|
221 |
+
)
|
222 |
+
|
223 |
+
worker_stages = defaultdict(lambda: "Initializing CUDA (Starting)")
|
224 |
+
|
225 |
+
while any(p.is_alive() for p in processes):
|
226 |
+
try:
|
227 |
+
worker_id, status = status_queue.get(timeout=0.1)
|
228 |
+
old_stage = worker_stages[worker_id]
|
229 |
+
worker_stages[worker_id] = status
|
230 |
+
|
231 |
+
if old_stage != status:
|
232 |
+
if old_stage != "Completed" and old_stage != "Error":
|
233 |
+
stage_counts[old_stage].update(-1)
|
234 |
+
stage_counts[status].update(1)
|
235 |
+
except queue.Empty:
|
236 |
+
pass
|
237 |
+
|
238 |
+
try:
|
239 |
+
progress = progress_queue.get(timeout=0.1)
|
240 |
+
overall_progress.update(progress)
|
241 |
+
except queue.Empty:
|
242 |
+
pass
|
243 |
+
|
244 |
+
for p in processes:
|
245 |
+
p.join()
|
246 |
+
|
247 |
+
for bar in stage_counts.values():
|
248 |
+
bar.close()
|
249 |
+
overall_progress.close()
|
250 |
+
|
251 |
+
print("All workers finished processing.")
|
252 |
+
|
253 |
+
# Print final statistics
|
254 |
+
completed_workers = sum(1 for stage in worker_stages.values() if stage == "Completed")
|
255 |
+
error_workers = sum(1 for stage in worker_stages.values() if stage == "Error")
|
256 |
+
print(f"Completed workers: {completed_workers}")
|
257 |
+
print(f"Workers with errors: {error_workers}")
|
258 |
+
|
259 |
+
|
260 |
+
if __name__ == "__main__":
|
261 |
+
main()
|