applied-ai-018 commited on
Commit
752ec27
·
verified ·
1 Parent(s): 3018c75

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. cc-multilingual-main/cc_net/build/lib/cc_net/__init__.py +5 -0
  2. cc-multilingual-main/cc_net/build/lib/cc_net/__main__.py +18 -0
  3. cc-multilingual-main/cc_net/build/lib/cc_net/dedup.py +478 -0
  4. cc-multilingual-main/cc_net/build/lib/cc_net/execution.py +248 -0
  5. cc-multilingual-main/cc_net/build/lib/cc_net/flat_hash_set.py +247 -0
  6. cc-multilingual-main/cc_net/build/lib/cc_net/get_wiki_cirrus.py +127 -0
  7. cc-multilingual-main/cc_net/build/lib/cc_net/jsonql.py +1340 -0
  8. cc-multilingual-main/cc_net/build/lib/cc_net/minify.py +304 -0
  9. cc-multilingual-main/cc_net/build/lib/cc_net/split_by_lang.py +151 -0
  10. cc-multilingual-main/cc_net/build/lib/cc_net/tokenizer.py +79 -0
  11. cc-multilingual-main/cc_net/cc_net/__init__.py +5 -0
  12. cc-multilingual-main/cc_net/cc_net/__init__.pyc +0 -0
  13. cc-multilingual-main/cc_net/cc_net/__main__.py +18 -0
  14. cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-310.pyc +0 -0
  15. cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-312.pyc +0 -0
  16. cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-38.pyc +0 -0
  17. cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-310.pyc +0 -0
  18. cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-312.pyc +0 -0
  19. cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-38.pyc +0 -0
  20. cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-310.pyc +0 -0
  21. cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-38.pyc +0 -0
  22. cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-310.pyc +0 -0
  23. cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-38.pyc +0 -0
  24. cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-310.pyc +0 -0
  25. cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-38.pyc +0 -0
  26. cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-310.pyc +0 -0
  27. cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-38.pyc +0 -0
  28. cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-310.pyc +0 -0
  29. cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-38.pyc +0 -0
  30. cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-310.pyc +0 -0
  31. cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-38.pyc +0 -0
  32. cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-310.pyc +0 -0
  33. cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-38.pyc +0 -0
  34. cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-310.pyc +0 -0
  35. cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-38.pyc +0 -0
  36. cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-310.pyc +0 -0
  37. cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-38.pyc +0 -0
  38. cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-310.pyc +0 -0
  39. cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-38.pyc +0 -0
  40. cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-310.pyc +0 -0
  41. cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-38.pyc +0 -0
  42. cc-multilingual-main/cc_net/cc_net/break.ipynb +0 -0
  43. cc-multilingual-main/cc_net/cc_net/data/cutoff.csv +101 -0
  44. cc-multilingual-main/cc_net/cc_net/data/test_stats.json +38 -0
  45. cc-multilingual-main/cc_net/cc_net/dedup.py +478 -0
  46. cc-multilingual-main/cc_net/cc_net/execution.py +248 -0
  47. cc-multilingual-main/cc_net/cc_net/flat_hash_set.py +247 -0
  48. cc-multilingual-main/cc_net/cc_net/get_wiki_cirrus.py +127 -0
  49. cc-multilingual-main/cc_net/cc_net/jsonql.py +1340 -0
  50. cc-multilingual-main/cc_net/cc_net/mine.py +648 -0
cc-multilingual-main/cc_net/build/lib/cc_net/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
cc-multilingual-main/cc_net/build/lib/cc_net/__main__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+
8
+ import func_argparse
9
+
10
+ import cc_net.mine
11
+
12
+
13
+ def main():
14
+ func_argparse.parse_and_call(cc_net.mine.get_main_parser())
15
+
16
+
17
+ if __name__ == "__main__":
18
+ main()
cc-multilingual-main/cc_net/build/lib/cc_net/dedup.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Tools to remove duplicate paragraphs across one or several shards.
9
+ """
10
+
11
+ import argparse
12
+ import gc
13
+ import hashlib
14
+ import logging
15
+ import multiprocessing
16
+ import os
17
+ import tempfile
18
+ import time
19
+ from pathlib import Path
20
+ from typing import Iterable, List, Optional, Set, Union
21
+
22
+ import numpy as np
23
+
24
+ from cc_net import jsonql
25
+ from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
26
+ from cc_net.jsonql import mem_footprint_gb
27
+ from cc_net.text_normalizer import normalize_for_dedup
28
+
29
+ BYTE_ORDER = "little"
30
+ HASH_SIZE = HASH_TYPE(0).nbytes
31
+ DISABLE_MULTI_PROCESSING = False
32
+
33
+ FilesOrDir = Union[List[Path], Path]
34
+
35
+
36
+ def get_args():
37
+ parser = argparse.ArgumentParser(
38
+ description="Read a set of json files and allow to query them",
39
+ parents=[jsonql.io_parser()],
40
+ )
41
+
42
+ parser.add_argument("--field", type=str, default="raw_content")
43
+ parser.add_argument("--output_hashes", type=str)
44
+ parser.add_argument("--no_finalize", action="store_false", dest="finalize")
45
+ # parser.add_argument("--mem_gb", type=int)
46
+ parser.add_argument("--hashes", type=str)
47
+
48
+ return vars(parser.parse_args())
49
+
50
+
51
+ def _b2i(b: bytes) -> int:
52
+ return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
53
+
54
+
55
+ def str_hash(s: str) -> int:
56
+ h = hashlib.sha1(bytes(s, encoding="utf-8"))
57
+ return _b2i(h.digest())
58
+
59
+
60
+ log = logging.getLogger(__name__).info
61
+
62
+
63
+ def run_par(processes):
64
+ # This is different from multiprocessing.map since it allows for kwargs.
65
+ processes = list(processes)
66
+ if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
67
+ for f, args, kwargs in processes:
68
+ f(*args, **kwargs)
69
+ return
70
+
71
+ log(f"Starting {len(processes)} subprocess")
72
+ processes = [
73
+ multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
74
+ ]
75
+ for p in processes:
76
+ p.start()
77
+ for p in processes:
78
+ p.join()
79
+ failed = 0
80
+ for p in processes:
81
+ if p.exitcode != 0:
82
+ log(f"Process failed with code {p.exitcode}: {p}")
83
+ failed += 1
84
+ assert failed == 0, f"{failed} processes failed..."
85
+
86
+
87
+ def split_file(file, n_splits):
88
+ for i in range(n_splits):
89
+ yield jsonql.SplitFile(file, i, n_splits)
90
+
91
+
92
+ def merge(hashes_1, hashes_2, output):
93
+ if isinstance(hashes_1, str):
94
+ h1 = FlatHashSet()
95
+ h1.load(hashes_1)
96
+ else:
97
+ h1 = hashes_1
98
+
99
+ if isinstance(hashes_2, str):
100
+ h2 = FlatHashSet()
101
+ h2.load(hashes_2)
102
+ else:
103
+ h2 = hashes_2
104
+
105
+ h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
106
+ dup = h1.__contains__(h2_np)
107
+
108
+ # Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
109
+ # h1 with their value.
110
+ h1[h2_np] = dup
111
+ if output:
112
+ h1.dump(output)
113
+ return h1
114
+
115
+
116
+ def merge_shard(hash_files, output):
117
+ h = FlatHashSet()
118
+ h.load(hash_files[0])
119
+ for hash_file in hash_files[1:]:
120
+ h = merge(h, hash_file, output=None)
121
+ print(f"Merged {hash_file}. We now have {len(h)} hashes.")
122
+
123
+ h.dump(output)
124
+ print(f"Saved {len(h)} hashes to {output}.")
125
+
126
+
127
+ def _dump_sentence_hashes(source: Path, output: Path, field: str):
128
+ treated = 0
129
+ started = time.time()
130
+ with open(output, "wb") as o:
131
+ for doc in jsonql.read_jsons(source):
132
+ content = doc.get(field)
133
+ if not content:
134
+ continue
135
+ h = compute_hashes(content)
136
+ if h is None:
137
+ continue
138
+ h.tofile(o)
139
+ treated += 1
140
+ if treated % 100_000 == 0:
141
+ delay = time.time() - started
142
+ log(
143
+ f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
144
+ )
145
+
146
+
147
+ def _remove_duplicate_hashes(duplicates, source, output):
148
+ batch_size = 100_000
149
+ n_lines, n_lines_kept = 0, 0
150
+ with open(source, "rb") as f, open(output, "wb") as o:
151
+ log(f"Opening {source} with mode rb")
152
+ log(f"Opening {output} with mode wb")
153
+ while True:
154
+ hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
155
+ if hashes.size == 0:
156
+ break
157
+
158
+ keep = duplicates[hashes] < 1
159
+ kept = keep.sum()
160
+ hashes *= keep
161
+ hashes.tofile(o)
162
+
163
+ n_lines += hashes.size
164
+ n_lines_kept += kept
165
+
166
+ removed = n_lines - n_lines_kept
167
+ selectivity = n_lines_kept / n_lines if n_lines else 0
168
+ log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
169
+
170
+
171
+ def remove_duplicates_sharded(
172
+ files: List[Path],
173
+ outputs: List[Path],
174
+ hashes_dir: FilesOrDir,
175
+ field: str,
176
+ group_hashes: int = 1,
177
+ tmp_dir: Path = None,
178
+ min_len: int = 0,
179
+ ):
180
+ """Remove duplicates in several passes, when all hashes don't fit in RAM.
181
+
182
+ Note: The current implementation is not doing a 'perfect' deduplication.
183
+ If a hash appear exactly once in each shard of hashes it won't be detected
184
+ as a duplicate. This can be fixed if hashes are fully dedup beforehand.
185
+ """
186
+ assert len(files) == len(outputs)
187
+
188
+ if isinstance(hashes_dir, list):
189
+ hashes_files = hashes_dir
190
+ else:
191
+ hashes_files = sorted(
192
+ h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
193
+ )
194
+
195
+ assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
196
+
197
+ if len(hashes_files) <= group_hashes:
198
+ log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
199
+ rm_dups = DuplicatesRemover(field, hashes_files)
200
+ rm_dups._prepare()
201
+ run_par(
202
+ (jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
203
+ for f, o in zip(files, outputs)
204
+ )
205
+ return
206
+
207
+ log(f"Starting deduplicate_sharded on {files}.")
208
+ tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
209
+
210
+ def tmp_files(i):
211
+ return [
212
+ Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
213
+ for f in files
214
+ ]
215
+
216
+ last = tmp_files(0)
217
+ run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
218
+
219
+ if isinstance(hashes_dir, list):
220
+ hashes_files = hashes_dir
221
+ else:
222
+ hashes_files = sorted(
223
+ h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
224
+ )
225
+ for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
226
+ hashes = FlatHashSet()
227
+ for h in group:
228
+ hashes.load(h)
229
+ log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
230
+
231
+ intermediates = tmp_files(i + 1)
232
+ # Remove hashes in parallel. Since modern OS have "copy-on-write" and
233
+ # `hashes` is read-only, we will only have one version of it in RAM.
234
+ run_par(
235
+ (_remove_duplicate_hashes, (hashes, f, tmp), {})
236
+ for f, tmp in zip(last, intermediates)
237
+ )
238
+ # Force hashes to be freed, before we start allocating a new one.
239
+ del hashes
240
+ gc.collect()
241
+
242
+ for tmp in last:
243
+ os.remove(tmp)
244
+ last = intermediates
245
+
246
+ def finalize(source, dedup_hashes, min_len):
247
+ n_chars, n_chars_kept = 0, 0
248
+ with open(dedup_hashes, "rb") as hashes:
249
+ for doc in jsonql.read_jsons(source):
250
+ content = doc.get(field)
251
+ if not content or len(content) < min_len:
252
+ continue
253
+ sentences = content.split("\n")
254
+ doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
255
+ chars, kept_chars = finalize_doc(doc, field, doc_hashes)
256
+ n_chars += chars
257
+ n_chars_kept += kept_chars
258
+ yield doc
259
+ selectivity = n_chars_kept / n_chars if n_chars else 0
260
+ log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
261
+
262
+ dedup_hashes = last
263
+ run_par(
264
+ [
265
+ (
266
+ jsonql.run_pipe,
267
+ (finalize,),
268
+ dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
269
+ )
270
+ for h, f, o in zip(dedup_hashes, files, outputs)
271
+ ]
272
+ )
273
+
274
+ tmp_directory.cleanup()
275
+
276
+
277
+ def compute_hashes(content) -> Optional[np.ndarray]:
278
+ if not content:
279
+ return None
280
+ lines = content.split("\n")
281
+ # save hashes as bytes but reinterpret them as uint64.
282
+ hashes = np.fromiter(
283
+ (
284
+ hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
285
+ :HASH_SIZE
286
+ ]
287
+ for l in lines
288
+ ),
289
+ dtype=np.dtype((bytes, HASH_SIZE)),
290
+ count=len(lines),
291
+ )
292
+ return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
293
+
294
+
295
+ def finalize_doc(doc, field, hashes=None):
296
+ content = doc.get(field)
297
+ lines = content.split("\n")
298
+ n_chars = len(content)
299
+ if "original_nlines" not in doc:
300
+ doc["original_nlines"] = doc.get("nlines", len(lines))
301
+ if "original_length" not in doc:
302
+ doc["original_length"] = doc.get("length", n_chars)
303
+ if hashes is None:
304
+ hashes = doc.pop(field + "_hash")
305
+
306
+ # Remove duplicates inside doc
307
+ seen: Set[int] = set()
308
+ original_line_ids = doc.get("line_ids", range(len(hashes)))
309
+ line_ids = []
310
+ new_lines = []
311
+ for l, line, h in zip(original_line_ids, lines, hashes):
312
+ if h not in seen and h != 0:
313
+ line_ids.append(l)
314
+ new_lines.append(line)
315
+ seen.add(h)
316
+
317
+ doc[field] = "\n".join(new_lines)
318
+ doc["nlines"] = len(line_ids)
319
+ n_chars_kept = len(doc[field])
320
+ doc["length"] = n_chars_kept
321
+ doc["line_ids"] = line_ids
322
+ return n_chars, n_chars_kept
323
+
324
+
325
+ class HashesCollector(jsonql.Transformer):
326
+ """
327
+ Collect all hashes found of lines found in the `field` of the source documents.
328
+ """
329
+
330
+ parallelisable = False
331
+
332
+ def __init__(
333
+ self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
334
+ ):
335
+ super().__init__()
336
+ self.n_lines = 0
337
+ self.field = field
338
+ self.output = output
339
+ self.hashes = FlatHashSet() if hashes is None else hashes
340
+ self.num_hashes_end = 0
341
+ self.num_hashes_start = len(self.hashes)
342
+
343
+ def summary(self) -> List[str]:
344
+ summ = super().summary()
345
+ h = self.num_hashes_end if self.hashes is None else len(self.hashes)
346
+ h = (h - self.num_hashes_start) // 1000
347
+ max_mem = mem_footprint_gb()
348
+ n = self.n_lines // 1000
349
+ summ.append(
350
+ f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
351
+ )
352
+ return summ
353
+
354
+ def do(self, doc: dict) -> None:
355
+ doc_hashes = compute_hashes(doc.get(self.field))
356
+ if doc_hashes is None:
357
+ return
358
+ self.hashes.add(doc_hashes)
359
+ self.n_lines += doc_hashes.size
360
+
361
+ def close(self):
362
+ if self.output and self.hashes:
363
+ self.hashes.dump(self.output)
364
+ self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
365
+ # Save the number of hashes.
366
+ self.num_hashes_end = len(self.hashes)
367
+ # Free up mem even if the transformer is kept somewhere else.
368
+ self.hashes = None # type: ignore
369
+
370
+
371
+ class DuplicatesRemover(jsonql.Transformer):
372
+ """DuplicatesRemover"""
373
+
374
+ # The hashes can't be pickled so they will have to be read back from disk.
375
+ warn_when_pickling = True
376
+
377
+ def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
378
+ """
379
+ Remove duplicates
380
+ """
381
+ super().__init__()
382
+ self.field = field
383
+ self.collect = collect
384
+
385
+ self.hashes_files = hashes_files
386
+ self.duplicates: Optional[AbstractDedupHashSet] = None
387
+
388
+ self.n_lines, self.n_lines_kept = 0, 0
389
+ self.n_chars, self.n_chars_kept = 0, 0
390
+
391
+ def _prepare(self):
392
+ if self.duplicates is not None:
393
+ return
394
+ self.duplicates = FlatHashSet()
395
+
396
+ start = time.time()
397
+ for h in self.hashes_files:
398
+ shard_start = time.time()
399
+ self.duplicates.load(str(h))
400
+ delay = time.time() - shard_start
401
+ self.log(
402
+ f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
403
+ )
404
+
405
+ delay = time.time() - start
406
+ self.log(
407
+ f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
408
+ )
409
+
410
+ def do(self, doc: dict) -> Optional[dict]:
411
+ content = doc.get(self.field)
412
+ if not content:
413
+ return None
414
+ doc_hashes = compute_hashes(content)
415
+
416
+ assert self.duplicates is not None
417
+ seen = (
418
+ self.duplicates.add(doc_hashes)
419
+ if self.collect
420
+ else self.duplicates[doc_hashes]
421
+ )
422
+ keep = seen < True
423
+ kept = keep.sum()
424
+ if kept == 0:
425
+ return None
426
+ doc_hashes = doc_hashes * keep
427
+ self.n_lines += keep.size
428
+ self.n_lines_kept += kept
429
+ chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
430
+ self.n_chars += chars
431
+ self.n_chars_kept += kept_chars
432
+ return doc
433
+
434
+ def summary(self) -> List[str]:
435
+ summ = super().summary()
436
+ end_time = time.time()
437
+ n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
438
+ speed = n_docs / (end_time - self.start_time)
439
+ summ.append(
440
+ f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
441
+ )
442
+ selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
443
+ summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
444
+
445
+ n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
446
+ selectivity = n_chars_kept / n_chars if n_chars else 0
447
+ summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
448
+ return summ
449
+
450
+
451
+ def deduplicate(
452
+ file: jsonql.ReadableFileLike, field: str = "raw_content"
453
+ ) -> Iterable[dict]:
454
+ """Remove duplicates of the given file (but keep the first occurence)."""
455
+ dup_remover = DuplicatesRemover(field, [], collect=True)
456
+ return dup_remover.map(jsonql.read_jsons(file))
457
+
458
+
459
+ def deduplicate_two_pass(
460
+ file: jsonql.FileDescriptor, field: str = "raw_content"
461
+ ) -> Iterable[dict]:
462
+ """Remove duplicates of the given file (even removing the first occurence).
463
+
464
+ This is what is done in the paper, and in mine.py
465
+ """
466
+ try:
467
+ if isinstance(file, Path):
468
+ hash_file: Path = file.with_suffix(".bin")
469
+ else:
470
+ hash_file = jsonql._tmp(Path("hashes.bin"))
471
+ jsonql.run_pipes(
472
+ jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
473
+ )
474
+ dup_remover = DuplicatesRemover(field, [hash_file])
475
+ return dup_remover.map(jsonql.read_jsons(file))
476
+ finally:
477
+ if hash_file.exists():
478
+ hash_file.unlink()
cc-multilingual-main/cc_net/build/lib/cc_net/execution.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ import functools
8
+ import itertools
9
+ import logging
10
+ import os
11
+ import sys
12
+ import time
13
+ import warnings
14
+ from pathlib import Path
15
+ from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
16
+
17
+ import submitit
18
+ from typing_extensions import Protocol
19
+ # import pdb
20
+ from concurrent.futures import ThreadPoolExecutor
21
+
22
+
23
+ class Executor(Protocol):
24
+ def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
25
+ ...
26
+
27
+
28
+ class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
29
+ def __init__(self, fn: Callable):
30
+ self.fn = fn
31
+ self.__name__ = fn.__name__
32
+
33
+ def __call__(self, *args, **kwargs):
34
+ return self.fn(*args, **kwargs)
35
+
36
+
37
+ def get_executor(
38
+ name: str,
39
+ log_dir: Path,
40
+ execution: str,
41
+ timeout_hour: float = 1.0,
42
+ mem_gb: int = 1,
43
+ cpus: int = 1,
44
+ task_parallelism: int = -1,
45
+ options: dict = {},
46
+ ) -> Executor:
47
+
48
+ execution_mode = execution.split(",")[0]
49
+ options.update(
50
+ {kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]}
51
+ )
52
+
53
+ if execution_mode == "mp":
54
+ warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
55
+ execution_mode = "local"
56
+
57
+ cluster = None if execution_mode == "auto" else execution_mode
58
+ # use submitit to detect which executor is available
59
+ ex = submitit.AutoExecutor(log_dir, cluster=cluster)
60
+ ex.parameters['timeout_min'] = int(timeout_hour * 60)
61
+
62
+ if ex.cluster == "local":
63
+ # LocalExecutor doesn't respect task_parallelism
64
+ return functools.partial(custom_map_array, ex, task_parallelism)
65
+ if ex.cluster == "debug":
66
+ return debug_executor
67
+ # pdb.set_trace()
68
+ # We are on slurm
69
+ if task_parallelism == -1:
70
+ task_parallelism = 500
71
+
72
+ ex.update_parameters(
73
+ name=name,
74
+ timeout_min=int(timeout_hour * 60),
75
+ mem_gb=mem_gb,
76
+ cpus_per_task=cpus,
77
+ slurm_array_parallelism=task_parallelism,
78
+ **options,
79
+ )
80
+ return functools.partial(map_array_and_wait, ex)
81
+
82
+
83
+ def map_array_and_wait(
84
+ ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable
85
+ ):
86
+ f_name = function.__name__
87
+
88
+ assert len(args) > 0, f"No arguments passed to {f_name}"
89
+ approx_length = _approx_length(*args)
90
+
91
+ print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
92
+ jobs = ex.map_array(function, *args)
93
+ if not jobs:
94
+ return
95
+ failed_jobs = []
96
+ done = 0
97
+ total = len(jobs)
98
+ job_array_id = jobs[0].job_id.split("_")[0]
99
+ # pdb.set_trace()
100
+ print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
101
+ for job in submitit.helpers.as_completed(jobs):
102
+ done += 1
103
+ e = job.exception()
104
+ if not e:
105
+ print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
106
+ continue
107
+
108
+ print(f"Failed job {job.job_id} ({done} / {total}):", e)
109
+ failed_jobs.append(job)
110
+
111
+ if failed_jobs:
112
+ n_failures = 10
113
+ message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
114
+ print(message)
115
+ for job in failed_jobs[:n_failures]:
116
+ print(f"Failed {job.job_id} -> {job.paths.stderr}")
117
+ if len(failed_jobs) > n_failures:
118
+ print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
119
+ raise Exception(message)
120
+
121
+
122
+ def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
123
+ logging.getLogger().setLevel(logging.DEBUG)
124
+ approx_length = _approx_length(*args)
125
+ for i, x in enumerate(zip(*args)):
126
+ try:
127
+ message = function(*x)
128
+ except Exception:
129
+ exit(1)
130
+ try:
131
+ import ipdb as pdb # type: ignore
132
+ except ImportError:
133
+ import pdb # type: ignore
134
+ import traceback
135
+
136
+ traceback.print_exc()
137
+ print("")
138
+ pdb.post_mortem()
139
+ sys.exit(1)
140
+ if message is not None:
141
+ print(message, f"({i + 1} / {approx_length})")
142
+
143
+ # def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
144
+ # logging.getLogger().setLevel(logging.DEBUG)
145
+ # approx_length = _approx_length(*args)
146
+ # with ThreadPoolExecutor(max_workers=4) as executor:
147
+ # futures = []
148
+ # for i, x in enumerate(zip(*args)):
149
+ # future = executor.submit(_execute_function, function, x, i + 1, approx_length)
150
+ # futures.append(future)
151
+ # for future in futures:
152
+ # future.result()
153
+
154
+ # def _execute_function(function: Callable[..., Optional[str]], args: tuple, index: int, total: int):
155
+ # try:
156
+ # message = function(*args)
157
+ # if message is not None:
158
+ # print(message, f"({index} / {total})")
159
+ # except Exception:
160
+ # # traceback.print_exc()
161
+ # sys.exit(1)
162
+
163
+ def _approx_length(*args: Iterable):
164
+ for a in args:
165
+ if isinstance(a, Sized):
166
+ return len(a)
167
+ return -1
168
+
169
+
170
+ def custom_map_array(
171
+ ex: submitit.AutoExecutor,
172
+ parallelism: int,
173
+ function: Callable[..., Optional[str]],
174
+ *args: Iterable,
175
+ ) -> None:
176
+ f_name = function.__name__
177
+ assert len(args) > 0, f"No arguments passed to {f_name}"
178
+
179
+ jobs_args = list(zip(*args))
180
+ total = len(jobs_args)
181
+ if parallelism < 0:
182
+ parallelism = os.cpu_count() or 0
183
+ assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
184
+ print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
185
+ enqueued = 0
186
+ done = 0
187
+ running_jobs: List[submitit.Job] = []
188
+ failed_jobs: List[submitit.Job] = []
189
+
190
+ while done < len(jobs_args):
191
+ # Try to queue more job if we have some bandwidth.
192
+ if enqueued < total and len(running_jobs) < parallelism:
193
+ running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
194
+ enqueued += 1
195
+ continue
196
+
197
+ # Else wait for some job to finish
198
+ if not running_jobs:
199
+ warnings.warn(
200
+ f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
201
+ )
202
+ break
203
+
204
+ job = get_next_job(running_jobs)
205
+ running_jobs.remove(job)
206
+ done += 1
207
+ e = job.exception()
208
+ if not e:
209
+ print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
210
+ continue
211
+
212
+ print(f"Failed job {job.job_id} ({done} / {total}):", e)
213
+ failed_jobs.append(job)
214
+
215
+ if failed_jobs:
216
+ n_failures = 10
217
+ message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
218
+ print(message)
219
+ for job in failed_jobs[:n_failures]:
220
+ print(f"Failed {job.job_id} -> {job.paths.stderr}")
221
+ if len(failed_jobs) > n_failures:
222
+ print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
223
+ raise Exception(message)
224
+
225
+
226
+ def get_next_job(
227
+ jobs: Sequence[submitit.Job], poll_frequency: float = 10
228
+ ) -> submitit.Job:
229
+ """
230
+ Waits for any of the job to finish and returns it.
231
+
232
+ jobs: list of jobs
233
+ poll_frequency: frequency in second at which we check job status
234
+ """
235
+ start = time.time()
236
+ waiting = False
237
+ while True:
238
+ for job in jobs:
239
+ if job.done():
240
+ return job
241
+ if not waiting:
242
+ job_ids = [j.job_id for j in jobs[:4]]
243
+ suffix = "..." if len(jobs) > 4 else ""
244
+ print(
245
+ f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
246
+ )
247
+ waiting = True
248
+ time.sleep(poll_frequency)
cc-multilingual-main/cc_net/build/lib/cc_net/flat_hash_set.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ import sys
8
+ import time
9
+ import warnings
10
+ from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
11
+
12
+ import numpy as np
13
+
14
+ HASH_TYPE: Type[np.uint64] = np.uint64
15
+
16
+ GETPY_WARNING = False
17
+
18
+
19
+ class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
20
+ """A dict-like that returns `True` for keys that have been added more than once.
21
+
22
+ The API is batched and expect np.array as input. This batching grants better
23
+ perf when using the C++ implementation.
24
+ """
25
+
26
+ dtype: Type[np.uint64] = HASH_TYPE
27
+
28
+ def __repr__(self):
29
+ implementation = type(self).__name__
30
+ return f"[{implementation}, len: {len(self)}"
31
+
32
+ def __len__(self) -> int:
33
+ ...
34
+
35
+ def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
36
+ ...
37
+
38
+ def __getitem__(self, values) -> np.ndarray:
39
+ ...
40
+
41
+ def __setitem__(self, keys, values) -> None:
42
+ ...
43
+
44
+ def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
45
+ ...
46
+
47
+ def keys(self) -> Iterable[np.uint64]:
48
+ ...
49
+
50
+ def __iter__(self) -> Iterator[np.uint64]:
51
+ return iter(self.keys())
52
+
53
+ def add(self, h, contains=None):
54
+ """Add the given keys. First time a key is added the value is set to 0,
55
+ then it's set to one."""
56
+ if not isinstance(h, np.ndarray):
57
+ h = np.array(h, dtype=HASH_TYPE)
58
+ if contains is None:
59
+ contains = self.__contains__(h)
60
+
61
+ self.__setitem__(h, contains)
62
+ return contains
63
+
64
+ def merge(self, keys, values):
65
+ contains = self.__contains__(keys)
66
+ self.__setitem__(keys, contains | values)
67
+
68
+ def dump(self, filename):
69
+ return self.dump_np(filename)
70
+
71
+ def load(self, filename):
72
+ return self.load_np(filename)
73
+
74
+ def dump_np(self, filename):
75
+ kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
76
+ items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
77
+ with open(filename, "wb") as f:
78
+ np.save(f, items)
79
+
80
+ def load_np(self, filename):
81
+ items = np.load(str(filename))
82
+ keys = items["k"].copy()
83
+ values = items["v"].copy()
84
+ self.merge(keys, values)
85
+
86
+ def dump_np2(self, filename):
87
+ keys = np.fromiter(
88
+ (k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
89
+ )
90
+ with open(filename, "wb") as f:
91
+ np.save(f, keys)
92
+
93
+ values = np.fromiter(
94
+ (v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
95
+ )
96
+ with open(str(filename) + ".val", "wb") as f:
97
+ np.save(f, values)
98
+
99
+ def load_np2(self, filename):
100
+ keys = np.load(filename)
101
+ values = np.load(str(filename) + ".val")
102
+ self.merge(keys, values)
103
+
104
+
105
+ class NaiveHashSet(dict, AbstractDedupHashSet):
106
+ """Pure python implementation of AbstractDedupHashSet.
107
+
108
+ This implementation is quite fast, since Python dict are heavily optimized.
109
+ """
110
+
111
+ def __init__(self, iterable=None):
112
+ super().__init__()
113
+ global GETPY_WARNING
114
+ if GETPY_WARNING:
115
+ warnings.warn(
116
+ "Module 'getpy' not found. Deduplication will take more RAM."
117
+ " Try `pip install cc_net[getpy]"
118
+ )
119
+ GETPY_WARNING = False
120
+
121
+ def __contains__(self, values):
122
+ """Returns `True` if the object has been added at list once."""
123
+ contains_point = super().__contains__
124
+ return np.fromiter(
125
+ map(contains_point, values), count=len(values), dtype=np.uint8
126
+ )
127
+
128
+ def __getitem__(self, values):
129
+ """Returns `True` if the object has been added at list twice."""
130
+ get_point = super().get
131
+ return np.fromiter(
132
+ map(lambda x: get_point(x, False), values),
133
+ count=len(values),
134
+ dtype=np.uint8,
135
+ )
136
+
137
+ def __setitem__(self, keys, values):
138
+ assert len(keys) == len(values)
139
+ for k, v in zip(keys, values):
140
+ dict.__setitem__(self, k, v)
141
+
142
+
143
+ try:
144
+ import getpy as gp # type: ignore
145
+
146
+ class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
147
+ """C++ backed implementation of AbstractDedupHashSet.
148
+
149
+ This implementation is slightly slower than the Python one but uses
150
+ 3x less RAM.
151
+ See https://github.com/atom-moyer/getpy.
152
+ """
153
+
154
+ def __init__(self):
155
+ super().__init__(HASH_TYPE, np.uint8, default_value=False)
156
+
157
+ def __contains__(self, h):
158
+ """Returns `True` if the object has been added at list once."""
159
+ if not isinstance(h, np.ndarray):
160
+ h = np.array(h, dtype=HASH_TYPE)
161
+ c = gp.Dict.__contains__(self, h)
162
+ c.dtype = np.uint8
163
+ return c
164
+
165
+ def dump(self, filename):
166
+ return self.dump_gp(filename)
167
+
168
+ def load(self, filename):
169
+ return self.load_gp(filename)
170
+
171
+ def dump_gp(self, filename):
172
+ return gp.Dict.dump(self, str(filename))
173
+
174
+ def load_gp(self, filename):
175
+ """Override gp.Dict.load, to correctly merge values instead of overwriting."""
176
+ other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
177
+ other.load(str(filename))
178
+ n = len(other)
179
+ keys = np.fromiter(
180
+ (k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
181
+ )
182
+ values = np.fromiter(
183
+ (v for (k, v) in other.items()), dtype=np.uint8, count=n
184
+ )
185
+ self.merge(keys, values)
186
+
187
+ FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
188
+ except ImportError:
189
+ GETPY_WARNING = True
190
+ FlatHashSet = NaiveHashSet
191
+
192
+
193
+ def timeit(message, function, *args):
194
+ start = time.time()
195
+ function(*args)
196
+ end = time.time()
197
+ print(message, f"took {end - start:.0f}s")
198
+
199
+
200
+ def compare_load(*filenames):
201
+ assert filenames, "No file given"
202
+
203
+ def load_list():
204
+ hashes = []
205
+ for f in filenames:
206
+ h = FlatHashSet()
207
+ h.load(f)
208
+ print(f"Loaded {h} from {f}.")
209
+ hashes.append(h)
210
+ return hashes
211
+
212
+ def load_all(load, ext):
213
+ hashes = FlatHashSet()
214
+ for f in filenames:
215
+ load(hashes, f + ext)
216
+
217
+ def dump_all(hashes, dump, ext):
218
+ for h, f in zip(hashes, filenames):
219
+ dump(h, f + ext)
220
+
221
+ hashes = load_list()
222
+ dump_gp = getattr(FlatHashSet, "dump_gp")
223
+ if dump_gp is not None:
224
+ timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
225
+ timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
226
+ timeit(
227
+ "Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
228
+ )
229
+
230
+ load_gp = getattr(FlatHashSet, "load_gp")
231
+ if load_gp is not None:
232
+ timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
233
+ timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
234
+ timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
235
+
236
+ # Loading 10 shards:
237
+ # [dedup] Dumping using gp.dump took 52s
238
+ # [dedup] Dumping using dump_np took 270s
239
+ # [dedup] Dumping using dump_np2 took 483s
240
+ #
241
+ # [dedup] Loading using gp.load took 654s
242
+ # [dedup] Loading using load_np took 82s
243
+ # [dedup] Loading using load_np2 took 76s
244
+
245
+
246
+ if __name__ == "__main__":
247
+ compare_load(*sys.argv[1:])
cc-multilingual-main/cc_net/build/lib/cc_net/get_wiki_cirrus.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Creates mono-lingual corpus from Wikipedia.
9
+ """
10
+
11
+ import functools
12
+ import re
13
+ import subprocess
14
+ import urllib.request
15
+ from pathlib import Path
16
+ from typing import Dict
17
+
18
+ import func_argparse
19
+ from bs4 import BeautifulSoup # type: ignore
20
+
21
+ from cc_net import jsonql, text_normalizer
22
+
23
+ CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
24
+ CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
25
+
26
+
27
+ def tmp(file: Path) -> Path:
28
+ return file.parent / ("tmp." + file.name)
29
+
30
+
31
+ def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
32
+ """Will dump the tokenized opening text of the given Wikipedia.
33
+
34
+ Args:
35
+ - file: File containing the Wikipedia dump.
36
+ - output: Output file.
37
+ - n_docs: How many docs to parse
38
+ - tokenize: whether to tokenize the text
39
+ - lang: Language code used to chose the tokenizer
40
+ """
41
+ assert file.exists()
42
+ return jsonql.run_pipes(
43
+ functools.partial(extract_opening_text, n_docs=n_docs),
44
+ file=file,
45
+ output=tmp(output) if output else None,
46
+ )
47
+ if output:
48
+ tmp(output).replace(output)
49
+
50
+
51
+ def extract_opening_text(source, n_docs: int = 10_000):
52
+ i = 0
53
+ for doc in jsonql.read_jsons(source):
54
+ if not doc:
55
+ continue
56
+
57
+ text = doc.get("opening_text")
58
+ if not text:
59
+ continue
60
+
61
+ yield text_normalizer.normalize(text)
62
+ i += 1
63
+ if i >= n_docs:
64
+ break
65
+
66
+
67
+ def dl(lang: str, output_dir: Path, date: str = None):
68
+ """Download the cirrus extract for the given lang.
69
+
70
+ See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
71
+
72
+ Args:
73
+ - lang: The Wikipedia code for the language.
74
+ - output_dir: Output directory. File will be `{lang}.json.gz`
75
+ - date: Date of a specific Cirrus dump.
76
+ """
77
+
78
+ urls = get_cirrus_urls(date)
79
+ assert (
80
+ lang in urls
81
+ ), f"--lang {lang} not found. Available languages are: {urls.keys()}"
82
+
83
+ assert output_dir, "--output_dir folder needed."
84
+ output_dir.mkdir(exist_ok=True)
85
+ output = output_dir / (lang + ".json.gz")
86
+ print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
87
+ wget(urls[lang], output)
88
+
89
+
90
+ def get_cirrus_urls(date: str = None) -> Dict[str, str]:
91
+ if date is None:
92
+ cirrus_page = BeautifulSoup(
93
+ urllib.request.urlopen(CIRRUS_URL), features="html.parser"
94
+ )
95
+ dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
96
+ dumps.remove("..")
97
+ dumps.remove("current")
98
+ # We take the oldest dump since the most recent might be incomplete.
99
+ # The page only link to the N latest dumps so the dump won't be too old.
100
+ date = min(dumps)
101
+
102
+ cirrus_url = "/".join((CIRRUS_URL, date))
103
+ print("Will use the Wikipedia dump from:", date, cirrus_url)
104
+ cirrus_page = BeautifulSoup(
105
+ urllib.request.urlopen(cirrus_url), features="html.parser"
106
+ )
107
+ urls = {}
108
+ for link in cirrus_page.findAll("a"):
109
+ match = CIRRUS_DUMP_RE.match(link.get("href"))
110
+ if not match:
111
+ continue
112
+
113
+ urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
114
+ assert urls, f"No valid download urls found at {cirrus_url}"
115
+ return urls
116
+
117
+
118
+ def wget(url: str, output: Path):
119
+ subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
120
+ tmp(output).replace(output)
121
+ assert (
122
+ output.stat().st_size > 10_000
123
+ ), f"File {output} downloaded from {url} looks too small"
124
+
125
+
126
+ if __name__ == "__main__":
127
+ func_argparse.main(dl, opening)
cc-multilingual-main/cc_net/build/lib/cc_net/jsonql.py ADDED
@@ -0,0 +1,1340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Manipulate files containing one json per line.
9
+ """
10
+ import argparse
11
+ import collections
12
+ import contextlib
13
+ import functools
14
+ import glob
15
+ import gzip
16
+ import importlib
17
+ import inspect
18
+ import io
19
+ import itertools
20
+ import json
21
+ import logging
22
+ import multiprocessing
23
+ import os
24
+ import re
25
+ import sys
26
+ import tempfile
27
+ import time
28
+ import typing as tp
29
+ import warnings
30
+ import zlib
31
+ from pathlib import Path
32
+ from typing import (
33
+ Callable,
34
+ Dict,
35
+ Iterable,
36
+ Iterator,
37
+ List,
38
+ Optional,
39
+ Sequence,
40
+ TextIO,
41
+ Tuple,
42
+ Union,
43
+ )
44
+
45
+ import numpy as np
46
+ import psutil # type: ignore
47
+ import requests
48
+ from typing_extensions import Protocol
49
+
50
+ logging.basicConfig(
51
+ level=logging.INFO,
52
+ format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
53
+ datefmt="%Y-%m-%d %H:%M",
54
+ )
55
+
56
+ NEWLINE = " N3WL1N3 "
57
+
58
+ FilterFn = Callable[[dict], bool]
59
+ FileDescriptor = Union[Path, List[Path], str]
60
+ WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
61
+ ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
62
+
63
+
64
+ def io_parser():
65
+ """Parser shared by all commands to get input/output files."""
66
+ parser = argparse.ArgumentParser(add_help=False)
67
+ file_help = """File to read from. Can be specified several times for several files.
68
+ Be careful that bash will expand glob patterns **before** sending the args
69
+ to python. To use globs put it inside single quotes:
70
+ jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
71
+ jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
72
+ [Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
73
+ [Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
74
+ """
75
+ parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
76
+ parser.add_argument("-o", "--output", type=Path, default="-")
77
+ parser.add_argument("--processes", type=int, default=1)
78
+ return parser
79
+
80
+
81
+ def get_parser():
82
+ parser = argparse.ArgumentParser(
83
+ description="Read a set of json files and allow to query them"
84
+ )
85
+ subparsers = parser.add_subparsers()
86
+
87
+ def add_subparser(function, arguments):
88
+ doc = function.__doc__.split("\n")[0]
89
+ p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
90
+ p.set_defaults(command=function)
91
+ for k, v in arguments.items():
92
+ p.add_argument(k, **v)
93
+
94
+ add_subparser(
95
+ select,
96
+ {
97
+ "columns": dict(nargs="+", help="Extract the value of the given fields"),
98
+ "--skip_empty": dict(
99
+ action="store_true", help="Skip lines without the requested fields"
100
+ ),
101
+ "--separator": dict(
102
+ default="\t", help="Separator to use between the different columns"
103
+ ),
104
+ "--newline": dict(
105
+ default=NEWLINE,
106
+ help="Replace newlines found in the text by the given string",
107
+ ),
108
+ },
109
+ )
110
+
111
+ add_subparser(
112
+ where,
113
+ {
114
+ "clauses": dict(nargs="+", help=""),
115
+ "--requires": dict(
116
+ action="append", help="Python module required by the clauses code."
117
+ ),
118
+ },
119
+ )
120
+
121
+ add_subparser(
122
+ merge,
123
+ {
124
+ "columns": dict(nargs="+", help=""),
125
+ "--separator": dict(
126
+ default="\t", help="Separator to use between the different columns"
127
+ ),
128
+ "--newline": dict(
129
+ default=NEWLINE, help="Replace the given string by actual newlines"
130
+ ),
131
+ },
132
+ )
133
+
134
+ add_subparser(
135
+ describe,
136
+ {
137
+ "columns": dict(nargs="*", help=""),
138
+ "--bins": dict(
139
+ default="auto", help="Number of bins for computing the histograms"
140
+ ),
141
+ "--cumulative": dict(
142
+ action="store_true", help="Compute cumulative histograms"
143
+ ),
144
+ "--weights": dict(type=str, help="Column used to weight histograms"),
145
+ },
146
+ )
147
+
148
+ add_subparser(split, {"--pattern": dict(type=str)})
149
+ add_subparser(shard, {})
150
+ return parser
151
+
152
+
153
+ def _split_array(array, sep):
154
+ last = 0
155
+ for i, x in enumerate(array):
156
+ if x != sep:
157
+ continue
158
+ yield array[last:i]
159
+ last = i + 1
160
+ if last != len(array):
161
+ yield array[last:]
162
+
163
+
164
+ def main(raw_args):
165
+ parser = get_parser()
166
+ pipeline = []
167
+ file = "-"
168
+ output = "-"
169
+ processes = 1
170
+
171
+ for args_group in _split_array(raw_args, "--"):
172
+ args = vars(parser.parse_args(args_group))
173
+ command = args.pop("command")
174
+ file = args.pop("file") or file
175
+ output = args.pop("output") or output
176
+ processes = args.pop("processes") or processes
177
+ pipeline.append(as_pipe(command, args))
178
+
179
+ if not pipeline:
180
+ parser.print_help()
181
+ return
182
+
183
+ run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
184
+
185
+
186
+ class Transformer:
187
+ """
188
+ Wrapper around functions transforming documents.
189
+
190
+ This allows `run_pipes` to automatically parallelize the pipeline.
191
+ Provides:
192
+ * Automatic logging. Logging can be changed with the `summary` method.
193
+ Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
194
+ * Automatic parallelization without pickling. The transformers are shared
195
+ across processes, and the object is usually not pickled.
196
+ * Basic pickling / unpickling in case it's still needed.
197
+ By default will only pickle the arguments passed to the constructor.
198
+ * Delayed initialization. Internal state which is not pickable should be set
199
+ inside the `_prepare` function.
200
+ """
201
+
202
+ parallelisable: bool = True
203
+ expect_json: bool = False
204
+ warn_when_pickling: bool = False
205
+ ready: bool = False
206
+
207
+ def __init_subclass__(cls, expect_json: bool = None):
208
+ """Detects if the subclass expects json as input."""
209
+ spec = inspect.getfullargspec(cls.do)
210
+ if expect_json is None:
211
+ expect_json = spec.annotations.get(spec.args[1], None) == dict
212
+
213
+ cls.expect_json = expect_json
214
+
215
+ def __new__(cls, *args, **kwargs):
216
+ """Creates the transformer and save the arguments passed to the constructor."""
217
+ t = super().__new__(cls)
218
+ Transformer.__init__(t, args, kwargs)
219
+ return t
220
+
221
+ def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
222
+ """
223
+ Init the transformer counters.
224
+
225
+ If state_args/state_kwargs are set they will override whatever was
226
+ originally passed to the subclass constructor.
227
+ """
228
+ if state_args is not None:
229
+ self.__args = state_args
230
+ if state_kwargs is not None:
231
+ self.__kwargs = state_kwargs
232
+
233
+ self.start_time = time.time()
234
+ self.__last_log = self.start_time
235
+ self.processed = 0
236
+ # Log every 5 min unless specified other wise.
237
+ self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
238
+ self.__cls = type(self)
239
+ self._logger = logging.getLogger(self.__cls.__name__)
240
+
241
+ def __call__(self, x):
242
+ assert self.ready, f"{self} is not ready."
243
+ if x is None:
244
+ return
245
+ y = self.do(x)
246
+ self.processed += 1
247
+ if time.time() - self.__last_log > self._log_freq:
248
+ self.log_summary()
249
+ return y
250
+
251
+ def do(self, x):
252
+ raise NotImplementedError(f"'do' not implemented in {type(self)}")
253
+
254
+ def summary(self) -> List[str]:
255
+ return [self.speed_summary()]
256
+
257
+ def speed_summary(self) -> str:
258
+ delay = time.time() - self.start_time
259
+ h = delay / 3600
260
+ s = self.processed / delay
261
+ return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
262
+
263
+ def log(self, message):
264
+ self._logger.info(message)
265
+
266
+ def log_summary(self) -> None:
267
+ if not self.ready:
268
+ self.log("Not ready.")
269
+ return
270
+ summ = self.summary() or []
271
+ for line in summ:
272
+ self.log(line)
273
+ self.__last_log = time.time()
274
+
275
+ def map(self, source: Iterable) -> Iterator:
276
+ if self.ready:
277
+ for x in source:
278
+ yield self(x)
279
+ # since we have been prepared by caller,
280
+ # caller is also responsible for calling `close`.
281
+ return
282
+ else:
283
+ with self:
284
+ for x in source:
285
+ yield self(x)
286
+
287
+ def __getstate__(self) -> Tuple[tuple, dict, bool]:
288
+ return (self.__args, self.__kwargs, self.expect_json)
289
+
290
+ def __setstate__(self, state: Tuple[tuple, dict, bool]):
291
+ if self.warn_when_pickling:
292
+ warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
293
+ (args, kwargs, expect_json) = state
294
+ # When unpickling `__new__` isn't called so we have to doit ourselves.
295
+ Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
296
+ type(self).__init__(self, *args, **kwargs)
297
+ assert self.expect_json == expect_json
298
+ # __setstate__ is called by multiprocessing right before calling
299
+ # the object so we need to initialize everything.
300
+ self.__enter__()
301
+
302
+ def _prepare(self) -> None:
303
+ pass
304
+
305
+ def __enter__(self) -> "Transformer":
306
+ # In multiprocessing __enter__ is always called twice, so we are idempotent.
307
+ # Because we call __enter__ when deserializing this transformer and
308
+ # also when the parent transformer is deserialized.
309
+ self.start_time = time.time()
310
+ if self.ready:
311
+ return self
312
+ self._prepare()
313
+ self.ready = True
314
+ return self
315
+
316
+ def __exit__(self, *args) -> None:
317
+ self.close()
318
+ self.log_summary()
319
+
320
+ def close(self) -> None:
321
+ pass
322
+
323
+
324
+ def as_pipe(transformer, kwargs):
325
+ if isinstance(transformer, type):
326
+ return transformer(**kwargs)
327
+ return lambda source: transformer(source, **kwargs)
328
+
329
+
330
+ def compose(fns: List[Transformer]) -> Transformer:
331
+ if len(fns) == 1:
332
+ return fns[0]
333
+ return MultiTransformer(fns)
334
+
335
+
336
+ class MultiTransformer(Transformer):
337
+ def __init__(self, transformers: List[Transformer]):
338
+ super().__init__()
339
+ self.transformers = transformers
340
+
341
+ def __repr__(self) -> str:
342
+ pipeline = " | ".join(type(t).__name__ for t in self.transformers)
343
+ return f"<{pipeline}>"
344
+
345
+ def do(self, x):
346
+ for t in self.transformers:
347
+ x = t(x)
348
+ return x
349
+
350
+ def _prepare(self):
351
+ for t in self.transformers:
352
+ t.__enter__()
353
+ return self
354
+
355
+ def __exit__(self, *args):
356
+ for t in self.transformers:
357
+ t.__exit__(*args)
358
+
359
+ def summary(self):
360
+ return itertools.chain(*(t.summary() for t in self.transformers))
361
+
362
+
363
+ class Mapper(Transformer):
364
+ def __init__(self, fn):
365
+ super().__init__()
366
+ self.fn = fn
367
+
368
+ def do(self, x):
369
+ return self.fn(x)
370
+
371
+
372
+ def run_pipe(
373
+ command,
374
+ kwargs: dict = None,
375
+ file: ReadableFileLike = None,
376
+ output: WritableFileLike = None,
377
+ ):
378
+ kwargs = kwargs or {}
379
+ if isinstance(kwargs, argparse.ArgumentParser):
380
+ kwargs = vars(kwargs.parse_args())
381
+ file = file or Path(kwargs.pop("file", "-"))
382
+ output = output or Path(kwargs.pop("output", "-"))
383
+
384
+ return run_pipes(as_pipe(command, kwargs), file=file, output=output)
385
+
386
+
387
+ def run_pipes(
388
+ *fns: Union[Transformer, Callable[[Iterable], Iterable]],
389
+ inputs: Iterable[dict] = None,
390
+ file: ReadableFileLike = None,
391
+ output: WritableFileLike = None,
392
+ processes: int = 1,
393
+ chunksize: int = 10_000,
394
+ ):
395
+ """
396
+ Run full document processing pipeline.
397
+
398
+ - fns: list of functions to run over the documents. Can be:
399
+ * `Iterable -> Iterable` function
400
+ * jsonql.Transformer instance
401
+ Using transformers allow the pipeline to process documents in parallel.
402
+ - inputs: iterable to read the documents from
403
+ - file: if inputs is not given, will read documents from this file.
404
+ - output: writable file like.
405
+ - processes: number of processes to use. -1 means all CPU available.
406
+ - chunksize: chunksize for multiprocessing.Pool.imap_unordered
407
+ """
408
+ expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
409
+ if expect_json and inputs is None:
410
+ fns = (JsonReader(),) + fns
411
+ transformers = []
412
+ for t in fns:
413
+ if not isinstance(t, Transformer):
414
+ break
415
+ if not t.parallelisable:
416
+ break
417
+ transformers.append(t)
418
+ pipes = fns[len(transformers) :]
419
+
420
+ log = logging.getLogger(__name__).info
421
+ if inputs is None:
422
+ data: Iterable = open_read(file)
423
+ else:
424
+ data = inputs
425
+
426
+ if processes == -1:
427
+ processes = os.cpu_count() or 0
428
+
429
+ with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
430
+ if transformers:
431
+ log(f"preparing {transformers}")
432
+ transform = stack.enter_context(compose(transformers))
433
+ if processes <= 1:
434
+ data = transform.map(data)
435
+ else:
436
+ p = multiprocessing.current_process()
437
+ log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
438
+ pool = stack.enter_context(
439
+ multiprocessing.Pool(
440
+ processes=processes,
441
+ initializer=_set_global_transformer,
442
+ initargs=(transform,),
443
+ )
444
+ )
445
+ data = pool.imap_unordered(
446
+ _global_transformer, data, chunksize=chunksize
447
+ )
448
+
449
+ for fn in pipes:
450
+ if isinstance(fn, Transformer):
451
+ data = fn.map(data)
452
+ else:
453
+ data = fn(data)
454
+
455
+ write_jsons(data, output)
456
+
457
+
458
+ # Allows to share transformer acroos subprocess.
459
+ # Used by `run_pipes`
460
+ _GLOBAL_TRANSFORMER: Optional[Transformer] = None
461
+
462
+
463
+ def _set_global_transformer(transformer: Transformer):
464
+ global _GLOBAL_TRANSFORMER
465
+ p = multiprocessing.current_process()
466
+ logging.info(
467
+ f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
468
+ )
469
+ assert transformer.ready, f"{transformer} isn't ready"
470
+ _GLOBAL_TRANSFORMER = transformer
471
+
472
+
473
+ def _global_transformer(document: str) -> Optional[dict]:
474
+ assert _GLOBAL_TRANSFORMER is not None
475
+ return _GLOBAL_TRANSFORMER(document)
476
+
477
+
478
+ def lines(file: ReadableFileLike) -> Iterator[str]:
479
+ return (line.strip("\n") for line in open_read(file))
480
+
481
+
482
+ def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
483
+ reader = JsonReader(strict=strict)
484
+ lines = open_read(file)
485
+ for line in lines:
486
+ if line is None:
487
+ continue
488
+ yield reader(line)
489
+
490
+ reader.log_summary()
491
+
492
+
493
+ def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
494
+ eol = os.linesep
495
+ with open_write(file) as o:
496
+ for res in source:
497
+ if res is None:
498
+ continue
499
+ if isinstance(res, dict):
500
+ json.dump(res, o, ensure_ascii=False)
501
+ o.write(eol)
502
+ continue
503
+ if isinstance(res, str):
504
+ res = res.rstrip("\n")
505
+ print(res, file=o)
506
+
507
+
508
+ class JsonReader(Transformer):
509
+ def __init__(self, strict: bool = False):
510
+ super().__init__()
511
+ self.ready = True
512
+ self.strict = strict
513
+ self.num_errors = 0
514
+
515
+ def do(self, line: str) -> Optional[dict]:
516
+ if line is None:
517
+ return None
518
+ if isinstance(line, dict):
519
+ return line
520
+ line = line.rstrip("\n")
521
+ if not line:
522
+ return None
523
+ try:
524
+ return json.loads(line)
525
+ except json.decoder.JSONDecodeError as e:
526
+ self.log_error(e)
527
+ if self.strict:
528
+ raise
529
+ return None
530
+
531
+ def log_error(self, e: json.decoder.JSONDecodeError):
532
+ self.num_errors += 1
533
+ if self.num_errors > 10:
534
+ return
535
+
536
+ MAX_LEN = 80
537
+ snippet, snippet_len = e.doc, len(e.doc)
538
+ col = e.pos
539
+ if snippet_len > MAX_LEN:
540
+ if col < MAX_LEN:
541
+ start = 0
542
+ elif snippet_len - col < MAX_LEN:
543
+ start = snippet_len - MAX_LEN
544
+ else:
545
+ start = col - MAX_LEN // 2
546
+ snippet = e.doc[start : start + MAX_LEN]
547
+ col = col - start
548
+ logging.warning(
549
+ "\n".join(
550
+ [
551
+ f"Invalid json (length={len(e.doc)}) {e}",
552
+ snippet,
553
+ " " * (col - 1) + "^",
554
+ ]
555
+ )
556
+ )
557
+
558
+ def summary(self):
559
+ summ = super().summary()
560
+ if self.num_errors > 0:
561
+ summ.append(f"Skipped {self.num_errors} invalid json.")
562
+ return summ
563
+
564
+
565
+ def compile_column(column, newline):
566
+ if callable(column):
567
+ return column
568
+
569
+ if column == "*":
570
+ return json.dumps
571
+
572
+ if re.match(r"[_a-z][_a-z0-9]*", column):
573
+
574
+ def extract_col(doc):
575
+ v = doc.get(column, "")
576
+ if isinstance(v, str) and newline != "\n":
577
+ v = v.rstrip("\n").replace("\n", newline)
578
+ return v
579
+
580
+ return extract_col
581
+
582
+ return compile_expr(column)
583
+
584
+
585
+ def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
586
+ """Yields the content of the requested columns."""
587
+ column_parsers = [compile_column(c, newline) for c in columns]
588
+ for doc in read_jsons(lines):
589
+ values = []
590
+ empty = True
591
+ for parse_col in column_parsers:
592
+ v = parse_col(doc)
593
+ values.append(str(v) or "")
594
+ empty = empty and v is None
595
+
596
+ if skip_empty and empty:
597
+ continue
598
+
599
+ yield separator.join(values)
600
+
601
+
602
+ def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
603
+ if not isinstance(clause, str):
604
+ return clause
605
+
606
+ args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
607
+ args_list = list(re.findall(args_re, clause))
608
+ if not args_list:
609
+ # This is only a warning because you may want to have eg random sampling
610
+ # that doesn't depend on the document.
611
+ logging.warn(
612
+ f"Warning: No variable found in expression: <{clause}>\n"
613
+ "Variables should be written inside braces, eg: {language}=='en'"
614
+ )
615
+ python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
616
+ requires = requires or []
617
+ modules = {r: importlib.import_module(r) for r in requires}
618
+ return eval(f"lambda doc: {python_like}", modules)
619
+
620
+
621
+ class where(Transformer):
622
+ """Filters the data using python code.
623
+
624
+ Ex: `jsonql where 'len({text}) > 100'`
625
+ """
626
+
627
+ def __init__(
628
+ self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
629
+ ):
630
+ super().__init__()
631
+ self.raw_clauses = clauses
632
+ self.requires = requires
633
+ self.n_selected = 0
634
+ self.clauses: List[FilterFn] = []
635
+
636
+ def _prepare(self):
637
+ self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
638
+
639
+ def do(self, doc: dict) -> Optional[dict]:
640
+ assert self.clauses
641
+ if not doc or not all((c(doc) for c in self.clauses)):
642
+ return None
643
+ self.n_selected += 1
644
+ return doc
645
+
646
+ def summary(self):
647
+ n_selected, n_docs = self.n_selected, self.processed
648
+ selectivity = n_selected / n_docs if n_docs else 0
649
+ return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
650
+
651
+
652
+ def merge(lines, columns, separator="\t", newline=NEWLINE):
653
+ """Reads tab separated columns and output a json using the given headers.
654
+
655
+ Headers are of form {key}[%{type}]
656
+ {type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
657
+ Default type is string.
658
+ A special header "_" means interpret this column as json, and append all other
659
+ columns to it. Must appear only once and on last position.
660
+
661
+ Ex:
662
+ `echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
663
+ `echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
664
+ `echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
665
+ """
666
+ handle_newlines = lambda s: s.replace(newline, "\n")
667
+ type_mapping: Dict[str, Callable] = {
668
+ "f": float,
669
+ "i": int,
670
+ "b": bool,
671
+ "s": handle_newlines,
672
+ }
673
+ type_parsing = [
674
+ type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
675
+ ]
676
+ columns = [f.split("%")[0] for f in columns]
677
+ doc_index = columns.index("_") if "_" in columns else -1
678
+ read_json = JsonReader()
679
+
680
+ def parse(line):
681
+ parts = line.split(separator, len(columns) - 1)
682
+ doc: Dict[str, tp.Any] = {}
683
+ for i, value in enumerate(parts):
684
+ if columns[i] == "_":
685
+ doc.update(read_json(parts[doc_index]))
686
+ else:
687
+ try:
688
+ doc[columns[i]] = type_parsing[i](value)
689
+ except ValueError:
690
+ logging.error(
691
+ f"Error when parsing column {i} of line: {line[:100]}..."
692
+ )
693
+ return doc
694
+
695
+ for line in lines:
696
+ yield json.dumps(parse(line))
697
+
698
+
699
+ class split(Transformer):
700
+ """Split a files in several smaller files based on the value of a field."""
701
+
702
+ # Not parallelisable since we are writing to files.
703
+ parallelisable = False
704
+
705
+ def __init__(
706
+ self,
707
+ pattern: Union[Path, str] = None,
708
+ split_fn: Callable[[dict], str] = None,
709
+ mkdir: bool = False,
710
+ ):
711
+ super().__init__()
712
+ assert not (
713
+ pattern and split_fn
714
+ ), "split can't have both a pattern and a split_fn"
715
+ if split_fn is not None:
716
+ self.split_fn = split_fn
717
+ else:
718
+ assert pattern, "split need either a pattern or a split_fn"
719
+ self.split_fn = self.make_split_fn(str(pattern))
720
+ self.mkdir = mkdir
721
+ self.o: dict = {}
722
+
723
+ def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
724
+ candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
725
+ return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
726
+
727
+ def do(self, doc):
728
+ filename = self.split_fn(doc)
729
+ if not filename:
730
+ return
731
+ o = self.o.get(filename, None)
732
+ if o is None:
733
+ if self.mkdir:
734
+ Path(filename).parent.mkdir(parents=True, exist_ok=True)
735
+ self.o[filename] = open_write(filename)
736
+ print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
737
+
738
+ def summary(self):
739
+ summ = super().summary()
740
+ summ.append(f"Found {len(self.o)} splits.")
741
+ return summ
742
+
743
+ def close(self):
744
+ for file in self.o.values():
745
+ file.close()
746
+
747
+
748
+ def histogram(values, bins, weights):
749
+ hist, bins = np.histogram(values, bins=bins)
750
+ # n_bins = len(hist)
751
+
752
+ if weights is not None:
753
+ # Bins can't be auto-determined if weights is supplied.
754
+ # So we first compute the bins without the weights then recompute
755
+ # the histogram with the weights.
756
+ hist, bins = np.histogram(values, bins=bins, weights=weights)
757
+ # cumsum = np.cumsum(hist)
758
+ # total = cumsum[-1]
759
+
760
+ # for i in range(n_bins - 1):
761
+ # if cumsum[i] / total > 0.9:
762
+ # useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
763
+ # new_bins = np.append(useful_range, [bins[-1]])
764
+ # return np.histogram(values, bins=new_bins, weights=weights)
765
+
766
+ return hist, bins
767
+
768
+
769
+ def _parse_bins(bins):
770
+ try:
771
+ if isinstance(bins, str):
772
+ if "," in bins:
773
+ bins = [int(b) for b in bins.split(",")]
774
+ else:
775
+ bins = int(bins)
776
+ except ValueError:
777
+ pass
778
+ return bins
779
+
780
+
781
+ ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
782
+ MAX_LABEL_LEN = 100
783
+
784
+
785
+ def bar_chart(hist, bins):
786
+ n = sum(hist)
787
+ max_h = max(hist)
788
+ out = []
789
+ for i, h in enumerate(hist):
790
+ h_size = 80 * h // max_h
791
+ dh_size = 80 * (h - hist[i - 1]) // max_h
792
+ if h_size == 0 or dh_size == 0:
793
+ continue
794
+ bar = "█" * h_size
795
+ out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
796
+ out.append(f"{bins[-1]:8.3f}")
797
+ return out
798
+
799
+
800
+ def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
801
+ out = []
802
+ documents = stats[ALL_DOCUMENTS]
803
+ count = stats.get(key, 0)
804
+ r = count / documents if documents else 0
805
+ out.append(f"Field {key} saw {count} times ({r:5.1%})")
806
+
807
+ length = stats.get(key + ".length", None)
808
+ avg_length = length // count if length else 0
809
+ if length is not None:
810
+ out[-1] += f", average length is {length // count}"
811
+
812
+ values = stats.get(key + ".val", None)
813
+ if values:
814
+ out[-1] += f", histogram is: (bins={bins})"
815
+ if weights:
816
+ if weights not in stats:
817
+ logging.warn(f"Warning: weights column {weights} not found.")
818
+ if weights + ".val" not in stats:
819
+ logging.warn(
820
+ f"Warning: weights column {weights} is not a numeric column."
821
+ )
822
+ weights = stats.get(weights + ".val")
823
+ hist, bins = histogram(values, _parse_bins(bins), weights)
824
+ if cumulative:
825
+ hist = np.cumsum(hist)
826
+ out += bar_chart(hist, bins)
827
+
828
+ cnt = stats.get(key + ".cnt", None)
829
+ if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
830
+ cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
831
+ out[-1] += ", top 100 labels:"
832
+ for label, n in cnt[:100]:
833
+ if n < 5:
834
+ continue
835
+ out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
836
+
837
+ return out
838
+
839
+
840
+ def describe(source, columns=None, weights=None, **kwargs):
841
+ """Compute some statistics about a dataset.
842
+
843
+ Stats can be restricted to a subset of columns."""
844
+ MAX_HIST_SIZE = 100_000_000
845
+ MAX_CNT_SIZE = 1000
846
+ stats = {ALL_DOCUMENTS: 0}
847
+ needed = columns + [weights] if columns else None
848
+
849
+ for doc in read_jsons(source):
850
+ stats[ALL_DOCUMENTS] += 1
851
+ for k, v in doc.items():
852
+ if needed and k not in needed:
853
+ continue
854
+ stats[k] = get_or_set(stats, k, 0) + 1
855
+ if isinstance(v, str):
856
+ stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
857
+ if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
858
+ continue
859
+ cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
860
+ if v in cnt or len(cnt) < MAX_CNT_SIZE:
861
+ cnt[v] += 1
862
+ elif type(v) in (int, float):
863
+ values = get_or_set(stats, k + ".val", [])
864
+ if len(values) < MAX_HIST_SIZE:
865
+ values.append(v)
866
+ elif type(v) is list and len(v) and type(v[0]) in (int, float):
867
+ values = get_or_set(stats, k + ".val", [])
868
+ if len(values) < MAX_HIST_SIZE:
869
+ values += v
870
+ elif type(v) is dict:
871
+ cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
872
+ for label in v:
873
+ if label in cnt or len(cnt) < MAX_CNT_SIZE:
874
+ cnt[label] += 1
875
+
876
+ documents = stats[ALL_DOCUMENTS]
877
+ yield f"Stats computed on {documents} documents:"
878
+ for k in stats:
879
+ if columns and k not in columns:
880
+ continue
881
+ if "." in k or k == ALL_DOCUMENTS:
882
+ continue
883
+ for line in display_stats(stats, k, weights=weights, **kwargs):
884
+ yield line
885
+
886
+
887
+ def shard(lines):
888
+ """Shard a file in several smaller ones."""
889
+ # The creation of the shard is handle in a generic way. Do we need this ?
890
+ return lines
891
+
892
+
893
+ # *** Utils ***
894
+
895
+
896
+ def get_or_set(dictionary, key, default):
897
+ if key not in dictionary:
898
+ dictionary[key] = default
899
+ return dictionary[key]
900
+
901
+
902
+ class SimpleIO(Protocol):
903
+ """A subset of methods from TextIO."""
904
+
905
+ def close(self) -> None:
906
+ ...
907
+
908
+ def write(self, line: str) -> int:
909
+ ...
910
+
911
+ def __enter__(self) -> "SimpleIO":
912
+ ...
913
+
914
+ def __exit__(self, exc_type, exc_value, traceback):
915
+ ...
916
+
917
+
918
+ def open_read(filename: ReadableFileLike) -> Iterable[str]:
919
+ """Open the given file, list of files or files matching the given glob and read lines.
920
+
921
+ `filename` is None or "-" -> reads from stdin
922
+ `filename` is a Path / str -> interprets filename as a glob and open files matching it
923
+ `filename` is a list -> opens sequentially all files from the list using `open_read`
924
+ `filename` is something else -> returns the object wrapped in a `nullcontext`
925
+ This allows to pass already openened files or iterables.
926
+
927
+ `open_read` will decompress gzip files, given they have ".gz" suffix.
928
+ """
929
+ if filename is None:
930
+ return sys.stdin
931
+
932
+ if isinstance(filename, list):
933
+ assert isinstance(filename[0], Path)
934
+ if len(filename) == 0:
935
+ return []
936
+ if len(filename) > 1:
937
+ return _yield_from(filename)
938
+ filename = tp.cast(Path, filename[0])
939
+ if isinstance(filename, str):
940
+ if filename.startswith("http://") or filename.startswith("https://"):
941
+ return open_remote_file(filename)
942
+
943
+ filename = Path(filename)
944
+ if not isinstance(filename, Path):
945
+ # we might have received an iterable, return it unmodified.
946
+ return filename # type: ignore
947
+
948
+ # Expand glob patterns only when reading
949
+ files = [Path(f) for f in sorted(glob.glob(str(filename)))]
950
+ if len(files) > 1:
951
+ return _yield_from(files)
952
+ if len(files) == 1:
953
+ filename = files[0]
954
+
955
+ assert isinstance(filename, Path)
956
+
957
+ if filename.name.endswith("]"):
958
+ return block_reader(filename)
959
+
960
+ logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
961
+ if filename.suffix == ".gz":
962
+ file: TextIO = gzip.open(filename, "rt") # type: ignore
963
+ else:
964
+ file = open(filename, "rt")
965
+
966
+ return _close_when_exhausted(file)
967
+
968
+
969
+ def _close_when_exhausted(file: TextIO) -> Iterable[str]:
970
+ with file:
971
+ yield from file
972
+
973
+
974
+ def _yield_from(files: list) -> Iterable[str]:
975
+ for file in files:
976
+ yield from open_read(file)
977
+
978
+
979
+ def open_write(
980
+ filename: WritableFileLike, max_size: str = "4G"
981
+ ) -> tp.ContextManager[TextIO]:
982
+ """Open the given file, list of files or files matching the given glob.
983
+
984
+ The return value is a ContextManager meant to be used inside a `with` block:
985
+ ```
986
+ with open_write("foo.txt") as o:
987
+ ...
988
+
989
+ Write mode:
990
+ replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
991
+ If filename ends with ".gz", creates a blocked gzip file with random access.
992
+ """
993
+ if filename is None:
994
+ return contextlib.nullcontext(sys.stdout)
995
+
996
+ if isinstance(filename, list):
997
+ if len(filename) > 1:
998
+ return MultiFile(filename, "w", max_size)
999
+ else:
1000
+ filename = tp.cast(Path, filename[0])
1001
+ if isinstance(filename, str):
1002
+ filename = Path(filename)
1003
+ if not isinstance(filename, Path):
1004
+ assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
1005
+ # We return a 'TextIO' even though we only check for `.write` method,
1006
+ # this works better with eg `print`.
1007
+ return contextlib.nullcontext(tp.cast(TextIO, filename))
1008
+
1009
+ mode = "wt"
1010
+ if "?" in filename.name:
1011
+ return sharded_file(filename, mode, max_size)
1012
+
1013
+ logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
1014
+ # TODO: should we use another format ?
1015
+ if filename.suffix == ".gz":
1016
+ return BlockedGzipWriter(Path(filename), mode, block_size="64M")
1017
+
1018
+ return open(filename, "wt")
1019
+
1020
+
1021
+ def parse_size(size):
1022
+ unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
1023
+ unit = size[-1].upper()
1024
+ assert (
1025
+ unit in unit_map
1026
+ ), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
1027
+ return int(size[:-1]) * unit_map[unit]
1028
+
1029
+
1030
+ class MultiFile(SimpleIO):
1031
+ def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
1032
+ self.name = str(files)
1033
+ self.mode = mode
1034
+ self.files = iter(files)
1035
+ self.max_size = parse_size(max_size)
1036
+ self.current_handle: Optional[TextIO] = None
1037
+ self.current_block_size = 0
1038
+ self._open_next_handle() # Opening 1st handle allows to write directly.
1039
+
1040
+ def write(self, content) -> int:
1041
+ # Avoid splitting newlines to a new file.
1042
+ # use current_block_size since it's faster than `tell()`
1043
+ if content != "\n" and self.current_block_size >= self.max_size:
1044
+ self._open_next_handle()
1045
+ if self.current_handle is None:
1046
+ raise Exception("No more files to write to...")
1047
+
1048
+ written = self.current_handle.write(content)
1049
+ self.current_block_size += written
1050
+ return written
1051
+
1052
+ def _open_next_handle(self) -> bool:
1053
+ self.close()
1054
+ file = next(self.files, None)
1055
+ if file is None:
1056
+ return False
1057
+
1058
+ self.current_handle = open_write(file).__enter__()
1059
+ self.current_block_size = 0
1060
+ return True
1061
+
1062
+ def __enter__(self):
1063
+ return self
1064
+
1065
+ def __exit__(self, *exc_info):
1066
+ self.close()
1067
+
1068
+ @property
1069
+ def closed(self):
1070
+ return self.current_handle is None
1071
+
1072
+ def close(self):
1073
+ if self.current_handle is None:
1074
+ return
1075
+
1076
+ # log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
1077
+ self.current_handle.__exit__(None, None, None)
1078
+ self.current_handle = None
1079
+
1080
+
1081
+ # not sure it helps since connections are reseted anyway.
1082
+ _session = functools.lru_cache()(requests.Session)
1083
+
1084
+
1085
+ def request_get_content(url: str, n_retry: int = 3) -> bytes:
1086
+ """Retrieve the binary content at url.
1087
+
1088
+ Retry on connection errors.
1089
+ """
1090
+ t0 = time.time()
1091
+ logging.info(f"Starting download of {url}")
1092
+ for i in range(1, n_retry + 1):
1093
+ try:
1094
+ r = _session().get(url)
1095
+ r.raise_for_status()
1096
+ break
1097
+ except requests.exceptions.RequestException as e:
1098
+ # Sleep and try again on error, unless it's a 404.
1099
+ message = e.args[0] if isinstance(e.args[0], str) else ""
1100
+ if i == n_retry or "Client Error" in message:
1101
+ raise e
1102
+ warnings.warn(
1103
+ f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
1104
+ )
1105
+ time.sleep(10 * 2 ** i)
1106
+ dl_time = time.time() - t0
1107
+ dl_speed = len(r.content) / dl_time / 1024
1108
+ logging.info(
1109
+ f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
1110
+ )
1111
+ return r.content
1112
+
1113
+
1114
+ def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
1115
+ """Download the files at the given url to memory and opens it as a file.
1116
+ Assumes that the file is small, and fetch it when this function is called.
1117
+ """
1118
+ if cache and cache.exists():
1119
+ return open_read(cache)
1120
+
1121
+ # TODO: open the remote file in streaming mode.
1122
+ # The hard part is that we need to write the content on disk at the same time,
1123
+ # to implement disk caching.
1124
+ raw_bytes = request_get_content(url)
1125
+ content = io.BytesIO(raw_bytes)
1126
+ if url.endswith(".gz"):
1127
+ f: TextIO = gzip.open(content, mode="rt") # type: ignore
1128
+ else:
1129
+ f = io.TextIOWrapper(content)
1130
+
1131
+ if cache and not cache.exists():
1132
+ # The file might have been created while downloading/writing.
1133
+ tmp_cache = _tmp(cache)
1134
+ tmp_cache.write_bytes(raw_bytes)
1135
+ if not cache.exists():
1136
+ tmp_cache.replace(cache)
1137
+ else:
1138
+ tmp_cache.unlink()
1139
+
1140
+ return _close_when_exhausted(f)
1141
+
1142
+
1143
+ def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
1144
+ folder, name = file_pattern.parent, file_pattern.name
1145
+ assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
1146
+
1147
+ n = name.count("?")
1148
+ assert 0 < n < 8
1149
+ assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
1150
+ assert "r" not in mode
1151
+ files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
1152
+
1153
+ return MultiFile(files, mode, max_size)
1154
+
1155
+
1156
+ class SplitFile:
1157
+ def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
1158
+ assert mode == "r"
1159
+ size = os.path.getsize(filename)
1160
+ self.handle = open(filename, mode)
1161
+ start = chunk * size // n_chunks
1162
+ self.end: int = (chunk + 1) * size // n_chunks
1163
+
1164
+ if start > 0:
1165
+ self.handle.seek(start - 1)
1166
+ # Skip incomplete line. This avoid crashing when reading eg the middle
1167
+ # of a unicode char. `self.handle.buffer` is a binary file reader.
1168
+ self.handle.buffer.readline() # type: ignore
1169
+
1170
+ def __enter__(self):
1171
+ return self
1172
+
1173
+ def __iter__(self):
1174
+ while True:
1175
+ line = self.handle.readline()
1176
+ if not line:
1177
+ return
1178
+
1179
+ yield line
1180
+ if self.handle.tell() >= self.end:
1181
+ return
1182
+
1183
+ def readlines(self):
1184
+ return list(self.__iter__())
1185
+
1186
+ def close(self):
1187
+ self.handle.close()
1188
+
1189
+ def __exit__(self, *args):
1190
+ self.close()
1191
+
1192
+
1193
+ def get_block_readers(filename: Path, n_readers, mode="t"):
1194
+ index_filename = filename.parent / (filename.name + ".index")
1195
+ if not index_filename.exists():
1196
+ return [gzip.open(filename, "r" + mode)]
1197
+ index: List[int] = np.load(index_filename)
1198
+ n_chunks = len(index)
1199
+ chunk_per_reader = int(np.ceil(n_chunks / n_readers))
1200
+ n_readers = int(np.ceil(n_chunks / chunk_per_reader))
1201
+
1202
+ start = 0
1203
+ readers = []
1204
+ for i in range(n_readers):
1205
+ end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
1206
+ r = _blocked_gzip_reader(filename, start, end, mode)
1207
+ readers.append(r)
1208
+ start = end
1209
+ return readers
1210
+
1211
+
1212
+ def block_reader(filename: Path) -> Iterable[str]:
1213
+ root, pattern = str(filename)[:-1].split("[", 1)
1214
+ assert root.endswith(".gz"), "Can only read block of a .gz file for now."
1215
+
1216
+ ii, nn = pattern.strip().split("/")
1217
+ i, n_readers = int(ii), int(nn)
1218
+
1219
+ index_filename = root + ".index"
1220
+ assert os.path.exists(
1221
+ index_filename
1222
+ ), f"Index {index_filename} not found for {filename}"
1223
+ index: List[int] = np.load(index_filename)
1224
+ n_chunks = len(index)
1225
+ chunk_per_reader = int(np.ceil(n_chunks / n_readers))
1226
+ n_readers = int(np.ceil(n_chunks / chunk_per_reader))
1227
+ # I'm not sure how to handle the case where there is less reader than expected.
1228
+ # Currently we return empty readers.
1229
+
1230
+ start = 0
1231
+ if i > 0:
1232
+ start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
1233
+ end = index[min(i * chunk_per_reader, n_chunks - 1)]
1234
+ return _blocked_gzip_reader(root, start, end, mode="t")
1235
+
1236
+
1237
+ def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
1238
+ handle = gzip.open(filename, "r" + mode)
1239
+ handle.seek(start)
1240
+ try:
1241
+ while handle.tell() < end:
1242
+ line = handle.readline()
1243
+ if not line:
1244
+ break
1245
+ yield line
1246
+ finally:
1247
+ handle.close()
1248
+
1249
+
1250
+ class BlockedGzipWriter(MultiFile):
1251
+ """Writes a Gzip files which can be read by block.
1252
+
1253
+ Decreasing the block size may hurt compression, but provides more split points.
1254
+ """
1255
+
1256
+ def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
1257
+ assert "w" in mode
1258
+ self.filename = Path(filename)
1259
+ self.index: List[int] = []
1260
+ self.zipfile: Optional[gzip.GzipFile] = None
1261
+ super().__init__([], mode, block_size)
1262
+
1263
+ def _open_next_handle(self) -> bool:
1264
+ """Here we never actually close/open handles,
1265
+ we just write the end of block sequence."""
1266
+ if not self.current_handle:
1267
+ mode = self.mode + "t"
1268
+ self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
1269
+ assert isinstance(self.current_handle.buffer, gzip.GzipFile)
1270
+ self.zipfile = self.current_handle.buffer
1271
+ return True
1272
+
1273
+ # Use Z_FULL_FLUSH to allow random access:
1274
+ # https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
1275
+ self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
1276
+ self.index.append(self.current_handle.tell())
1277
+ self.current_block_size = 0
1278
+ return True
1279
+
1280
+ def flush(self):
1281
+ assert self.current_handle is not None
1282
+ self.current_handle.flush()
1283
+
1284
+ def close(self):
1285
+ if self.current_handle is None:
1286
+ return
1287
+ self.current_handle.flush()
1288
+ self.index.append(self.current_handle.tell())
1289
+ self.current_handle.close()
1290
+ self.current_handle = None
1291
+ index = np.array(self.index, dtype=np.uint64)
1292
+ with open(str(self.filename) + ".index", "wb") as o:
1293
+ np.save(o, index)
1294
+
1295
+
1296
+ def grouper(iterable, n):
1297
+ group = []
1298
+ for x in iterable:
1299
+ group.append(x)
1300
+ if len(group) == n:
1301
+ yield group
1302
+ group = []
1303
+ if group:
1304
+ yield group
1305
+
1306
+
1307
+ PROCESS = psutil.Process()
1308
+
1309
+
1310
+ def mem_footprint_gb(pid=None):
1311
+ rss = PROCESS.memory_info().rss
1312
+ return rss / 1_000_000_000
1313
+
1314
+
1315
+ def _tmp(output: Path) -> Path:
1316
+ suffix = "".join(output.suffixes)
1317
+ suffix = ".tmp" + suffix
1318
+ prefix = output.name[: -len(suffix)]
1319
+ _, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
1320
+ return Path(tmp_path)
1321
+
1322
+
1323
+ @functools.lru_cache()
1324
+ def _tmp_dir() -> Path:
1325
+ job_id = os.environ.get("SLURM_JOB_ID")
1326
+ if job_id:
1327
+ return Path("/scratch/slurm_tmpdir") / job_id
1328
+
1329
+ checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
1330
+ if checkpoint.exists():
1331
+ tmp = checkpoint / "tmp"
1332
+ tmp.mkdir(exist_ok=True)
1333
+ return tmp
1334
+
1335
+ return Path("/tmp")
1336
+
1337
+
1338
+ if __name__ == "__main__":
1339
+ multiprocessing.set_start_method("fork")
1340
+ main(sys.argv[1:])
cc-multilingual-main/cc_net/build/lib/cc_net/minify.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ import base64
8
+ import hashlib
9
+ import itertools
10
+ import urllib.parse
11
+ from pathlib import Path
12
+ from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
13
+
14
+ import numpy as np
15
+
16
+ from cc_net import jsonql
17
+ from cc_net.execution import get_executor
18
+ from cc_net.jsonql import mem_footprint_gb
19
+
20
+ HASH_SIZE = 4
21
+ HASH_TYPE = np.uint32
22
+
23
+ PUBLIC_FIELDS = ["url", "digest"]
24
+ COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
25
+ DATA = Path(__file__).parent.parent / "data"
26
+
27
+
28
+ # This is similar to dedup methods but with use 32 bits hashes.
29
+ def _b2i(b: bytes) -> int:
30
+ return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
31
+
32
+
33
+ def _str_hash(s: str) -> int:
34
+ h = hashlib.sha1(bytes(s, encoding="utf-8"))
35
+ return _b2i(h.digest())
36
+
37
+
38
+ def get_hashes(lines: Iterable[str]) -> List[bytes]:
39
+ h = HASH_SIZE
40
+ return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
41
+
42
+
43
+ def encode_hashes(hashes: Iterable[bytes]) -> str:
44
+ return base64.b64encode(b"".join(hashes)).decode("ascii")
45
+
46
+
47
+ def encode_as_hashes(lines: Iterable[str]) -> str:
48
+ return encode_hashes(get_hashes(lines))
49
+
50
+
51
+ def decode_hashes(compact: str) -> List[bytes]:
52
+ all_hashes = base64.b64decode(compact)
53
+ res = []
54
+ assert len(all_hashes) % HASH_SIZE == 0
55
+ for i in range(len(all_hashes) // HASH_SIZE):
56
+ chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
57
+ res.append(chunk)
58
+
59
+ return res
60
+
61
+
62
+ def encode_line_ids(line_ids: Sequence[int]) -> str:
63
+ arr = np.array(line_ids, dtype="<u2")
64
+ return base64.b64encode(arr.tobytes()).decode("ascii")
65
+
66
+
67
+ def decode_line_ids(compact: str) -> List[int]:
68
+ ids_bytes = bytearray(base64.b64decode(compact))
69
+ return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
70
+
71
+
72
+ def get_doc_key(digest: str) -> int:
73
+ assert digest.startswith("sha1:")
74
+ h = base64.b32decode(digest[5:])
75
+ return _b2i(h[:HASH_SIZE])
76
+
77
+
78
+ class Minifier(jsonql.Transformer):
79
+ ready = True
80
+
81
+ def __init__(self):
82
+ self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
83
+
84
+ def do(self, doc: dict) -> Optional[dict]:
85
+ line_ids: List[int] = doc.pop("line_ids")
86
+ fields = self.fields
87
+ keys = list(doc.keys())
88
+ for k in keys:
89
+ if k not in fields:
90
+ doc.pop(k, None)
91
+ p = doc.get("perplexity", 0)
92
+ doc["line_ids"] = encode_line_ids(line_ids)
93
+ if p:
94
+ doc["perplexity"] = round(p, 1)
95
+ s = doc.get("language_score", 0)
96
+ if s:
97
+ doc["language_score"] = round(s, 2)
98
+ return doc
99
+
100
+
101
+ class MetadataFetcher(jsonql.Transformer):
102
+ """Reads documents from CC snapshot and join precomputed metadata.
103
+
104
+ CC snapshots are split in segments. Each segment is 64Mb long.
105
+ The metadata must also be stored in segments of the same size and names.
106
+ """
107
+
108
+ def __init__(self, folder: Union[Path, str]):
109
+ self.ready = True
110
+ self.metadata: Dict[int, dict] = {}
111
+
112
+ self._segments: Set[str] = set()
113
+ self.read_doc = 0
114
+ self.missed_doc = 0
115
+ self.missed_par = 0
116
+ self.processed_par = 0
117
+
118
+ if isinstance(folder, str):
119
+ # detect path passed as string
120
+ if urllib.parse.urlparse(folder).scheme == "":
121
+ folder = Path(folder)
122
+ assert folder.exists(), f"Metadata folder not found: {folder}"
123
+
124
+ self.folder = folder
125
+ self.segment: str = ""
126
+ self.segments_read_twice = 0
127
+
128
+ def meta_file(self, segment: str) -> str:
129
+ file_name = segment.split("/")[-1]
130
+ assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
131
+ if isinstance(self.folder, str):
132
+ return urllib.parse.urljoin(
133
+ self.folder, file_name.replace(".warc.wet", ".json")
134
+ )
135
+ meta_file = self.folder / file_name.replace(".warc.wet", ".json")
136
+ assert (
137
+ meta_file.exists()
138
+ ), f"Couldn't find metadata file for segment {segment} at {meta_file}"
139
+ return str(meta_file)
140
+
141
+ def fetch_metadata(self, segment: str) -> None:
142
+ meta_file = self.meta_file(segment)
143
+ k = get_doc_key
144
+ self.metadata = {}
145
+ collision = 0
146
+ for m in jsonql.read_jsons(meta_file):
147
+ key = k(m["digest"])
148
+ if key in self.metadata:
149
+ collision += 1
150
+ self.metadata[key] = m
151
+
152
+ self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
153
+ if collision > 0:
154
+ self._logger.warning(f"Found {collision} collisions !")
155
+
156
+ self.segment = segment
157
+ if segment in self._segments:
158
+ self.log("Cache miss")
159
+ self.segments_read_twice += 1
160
+ self._segments.add(segment)
161
+
162
+ def do(self, doc: dict) -> Optional[dict]:
163
+ if self.segment != doc["cc_segment"]:
164
+ self.fetch_metadata(doc["cc_segment"])
165
+ digest = doc["digest"]
166
+ key = get_doc_key(digest)
167
+ if key not in self.metadata:
168
+ return None
169
+
170
+ metadata = self.metadata.pop(key)
171
+ return self.clean(metadata, doc)
172
+
173
+ def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
174
+ line_ids = decode_line_ids(metadata.pop("line_ids"))
175
+ lines = full_doc["raw_content"].split("\n")
176
+ cleaned = []
177
+ for l in line_ids:
178
+ if l >= len(lines) or l < 0:
179
+ self.missed_par += 1
180
+ continue
181
+ cleaned.append(lines[l])
182
+
183
+ self.processed_par += len(line_ids)
184
+ if not cleaned:
185
+ self.missed_doc += 1
186
+ return None
187
+
188
+ full_doc["raw_content"] = "\n".join(cleaned)
189
+ full_doc["original_nlines"] = full_doc["nlines"]
190
+ full_doc["original_length"] = full_doc["length"]
191
+ full_doc["nlines"] = len(cleaned)
192
+ full_doc["length"] = len(full_doc["raw_content"])
193
+ for key, value in metadata.items():
194
+ full_doc[key] = value
195
+ return full_doc
196
+
197
+ def summary(self) -> List[str]:
198
+ summ = super().summary()
199
+ mem = mem_footprint_gb()
200
+ len_cache = len(self.metadata)
201
+ summ.append(
202
+ f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
203
+ )
204
+ if self.missed_doc:
205
+ r = self.missed_doc / self.processed
206
+ summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
207
+
208
+ if self.missed_par:
209
+ r = self.missed_par / self.processed
210
+ summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
211
+ return summ
212
+
213
+
214
+ def _expand_files(files: List[Path]) -> List[Path]:
215
+ if len(files) == 1 and files[0].is_dir():
216
+ folder = files[0]
217
+ files = sorted(folder.glob("*.json.gz"))
218
+ print(f"Found {len(files)} files under {folder}/*.json.gz")
219
+ assert files, "No files found"
220
+ return files
221
+
222
+
223
+ def minify_file(file: Path, output: Path) -> str:
224
+ """Minify the given file."""
225
+ jsonql.run_pipes(Minifier(), file=file, output=output)
226
+ return f"Minified {output}"
227
+
228
+
229
+ def minify(
230
+ files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
231
+ ):
232
+ """Minify all the files in the given folder."""
233
+ files = _expand_files(files)
234
+ output_dir.mkdir(exist_ok=True)
235
+ with open(output_dir / "files.txt", "w") as o:
236
+ for f in files:
237
+ print(f.name, file=o)
238
+ outputs = [output_dir / f.name for f in files]
239
+ ex = get_executor(
240
+ "minify",
241
+ output_dir / "logs",
242
+ execution,
243
+ timeout_hour=2,
244
+ cpus=1,
245
+ task_parallelism=parallelism,
246
+ )
247
+ ex(minify_file, files, outputs)
248
+
249
+
250
+ def fetch_metadata_file(
251
+ file: Union[Path, str],
252
+ metadata_dir: Union[Path, str],
253
+ output: Path,
254
+ cache_dir: Path = None,
255
+ ):
256
+ unminifier = MetadataFetcher(metadata_dir)
257
+ tmp = output.with_name("tmp." + output.name)
258
+ jsonql.run_pipes(unminifier, file=file, output=tmp)
259
+ tmp.rename(output)
260
+ return f"Fetched metadata for {file}. Results at {output}."
261
+
262
+
263
+ def fetch_metadata(
264
+ files: List[str],
265
+ metadata_dir: Union[Path, str],
266
+ output_dir: Path,
267
+ execution: str = "mp",
268
+ parallelism: int = -1,
269
+ cache_dir: Path = None,
270
+ ):
271
+ if len(files) == 1 and Path(files[0]).is_dir():
272
+ folder = Path(files[0])
273
+ files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
274
+ print(f"Found {len(files)} files under {folder}/*.json.gz")
275
+
276
+ assert len(files) > 0, "No files given."
277
+ output_dir.mkdir(exist_ok=True)
278
+
279
+ outputs = [output_dir / str(f).split("/")[-1] for f in files]
280
+ if cache_dir is None:
281
+ cache_dir = output_dir / "wet_cache"
282
+ cache_dir.mkdir(exist_ok=True)
283
+ if str(cache_dir) == "none":
284
+ cache_dir = None
285
+ files = [f for f, o in zip(files, outputs) if not o.exists()]
286
+ outputs = [o for o in outputs if not o.exists()]
287
+ if not files:
288
+ return
289
+ ex = get_executor(
290
+ "unminify",
291
+ output_dir / "logs",
292
+ execution,
293
+ timeout_hour=8,
294
+ cpus=1,
295
+ task_parallelism=parallelism,
296
+ mem_gb=32,
297
+ )
298
+ ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
299
+
300
+
301
+ if __name__ == "__main__":
302
+ import func_argparse
303
+
304
+ func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
cc-multilingual-main/cc_net/build/lib/cc_net/split_by_lang.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+
7
+ import argparse
8
+ import collections
9
+ from pathlib import Path
10
+ from typing import Dict, Optional
11
+
12
+ import fasttext # type: ignore
13
+
14
+ from cc_net import jsonql
15
+
16
+
17
+ def get_args():
18
+ parser = argparse.ArgumentParser(
19
+ description="Read a list of json files and split them ",
20
+ parents=[jsonql.io_parser()],
21
+ )
22
+ parser.add_argument("--pattern", type=str)
23
+ parser.add_argument("--field", type=str, default="raw_content")
24
+ parser.add_argument("--threshold", type=float, default=0)
25
+ parser.add_argument("--model", type=str, required=True)
26
+ parser.add_argument("--out_field", type=str, default="language")
27
+ parser.add_argument("--top", type=int, default=1)
28
+ return vars(parser.parse_args())
29
+
30
+
31
+ def predict(model, text: str, k: int = 1):
32
+ labels, scores = model.predict(text, k=k)
33
+ labels = [l.replace("__label__", "") for l in labels]
34
+ return labels, scores
35
+
36
+
37
+ def avg_predict(model, text):
38
+ # Overall gives the same results than predict(model, text.replace("\n", ""))
39
+ text = text.split("\n")
40
+ text_len = sum(len(line) for line in text)
41
+ if text_len == 0:
42
+ return None, 0
43
+ scores = [predict(model, line) for line in text]
44
+ scores_by_label: Dict[str, float] = collections.defaultdict(float)
45
+ for (label, score), line in zip(scores, text):
46
+ scores_by_label[label] += score * len(line)
47
+
48
+ label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
49
+ return label, score / text_len
50
+
51
+
52
+ class Classifier(jsonql.Transformer):
53
+ def __init__(
54
+ self,
55
+ model: Path,
56
+ field: str,
57
+ out_field: str,
58
+ threshold: float = 0,
59
+ top: int = 1,
60
+ language: str = None,
61
+ rounding: int = 2,
62
+ ):
63
+ super().__init__()
64
+ self.model = model
65
+ assert model.exists(), f"Model {model} doesn't exist."
66
+ self.field = field
67
+ self.out_field = out_field
68
+ self.threshold = threshold
69
+ self.top = top
70
+ self.language = language
71
+ self.rounding = rounding
72
+ # Fasttext model is a C object and can't be pickled
73
+ self.fasttext_model: fasttext._FastText = None
74
+ self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
75
+ self.cnt: Dict[str, int] = {}
76
+
77
+ def _prepare(self):
78
+ self.log(f"Loading {self.model}")
79
+ self.fasttext_model = fasttext.load_model(str(self.model))
80
+
81
+ def predict(self, text):
82
+ return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
83
+
84
+ def do(self, doc: dict) -> Optional[dict]:
85
+ text = doc.get(self.field, None)
86
+ if not text:
87
+ return None
88
+
89
+ if self.language and doc.get("language") != self.language:
90
+ self.n_ignored += 1
91
+ return doc
92
+
93
+ self.n_doc += 1
94
+ labels, scores = self.predict(text)
95
+ scores.round(self.rounding, out=scores)
96
+ for l in labels:
97
+ self.cnt[l] = self.cnt.get(l, 0) + 1
98
+
99
+ if self.top == 1:
100
+ existing_label = doc.get(self.out_field, None)
101
+ if existing_label and labels[0] != existing_label:
102
+ self.n_disagreement += 1
103
+
104
+ if all(s < self.threshold for s in scores):
105
+ return None
106
+
107
+ self.n_accepted += 1
108
+ if self.top == 1:
109
+ doc[self.out_field] = labels[0]
110
+ doc[self.out_field + "_score"] = scores[0]
111
+ else:
112
+ doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
113
+ return doc
114
+
115
+ def summary(self):
116
+ n_doc, n_accepted, n_disagreement, cnt, out_field = (
117
+ self.n_doc,
118
+ self.n_accepted,
119
+ self.n_disagreement,
120
+ self.cnt,
121
+ self.out_field,
122
+ )
123
+ summ = super().summary()
124
+ if self.threshold > 0:
125
+ ratio = n_accepted / n_doc if n_doc else 0
126
+ summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
127
+ summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
128
+
129
+ disagreement = n_disagreement / n_doc if n_doc else 0
130
+ if disagreement:
131
+ summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
132
+ return summ
133
+
134
+ def __repr__(self):
135
+ return f"Classifier({self.model})"
136
+
137
+
138
+ def classify_and_split(file, output, pattern, **kwargs):
139
+ classifier = Classifier(**kwargs)
140
+ splitter = jsonql.split(pattern)
141
+ jsonql.run_pipes(classifier, splitter, file=file, output=output)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ args = get_args()
146
+ pattern = args.get("pattern")
147
+ if pattern:
148
+ classify_and_split(**args)
149
+ else:
150
+ args.pop("pattern")
151
+ jsonql.run_pipe(Classifier, args)
cc-multilingual-main/cc_net/build/lib/cc_net/tokenizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ import time
8
+ from typing import Dict, Optional
9
+
10
+ import sacremoses # type: ignore
11
+
12
+ from cc_net import jsonql, text_normalizer
13
+
14
+
15
+ class RobustTokenizer(jsonql.Transformer):
16
+ """Moses tokenizer with the expected preprocessing."""
17
+
18
+ LANG_WITHOUT_ACCENT = {"en", "my"}
19
+
20
+ def __init__(self, lang: str):
21
+ super().__init__()
22
+ self.lang = lang
23
+ self.moses = sacremoses.MosesTokenizer(lang)
24
+ self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
25
+ self.ready = True
26
+
27
+ def do(self, text: str):
28
+ text = text_normalizer.normalize(
29
+ text, accent=self.rm_accent, case=False, numbers=False, punct=True
30
+ )
31
+ text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
32
+ return self.moses.tokenize(text, return_str=True, escape=False)
33
+
34
+
35
+ class DocTokenizer(jsonql.Transformer):
36
+ """Tokenize the text found in `output_field and store the result in `output_field`."""
37
+
38
+ def __init__(
39
+ self,
40
+ field: str,
41
+ output_field: str = "tokenized",
42
+ language_field: str = "language",
43
+ ):
44
+ super().__init__()
45
+ self.field = field
46
+ self.output_field = output_field
47
+ self.language_field = language_field
48
+ self.n_docs = 0
49
+ self.tokenizers: Dict[str, RobustTokenizer] = {}
50
+
51
+ def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
52
+ cache = self.tokenizers
53
+ if lang in cache:
54
+ return cache[lang]
55
+ if lang in ("th", "zh", "ja"):
56
+ # TODO find a tokenizer for those languages
57
+ return None
58
+
59
+ cache[lang] = RobustTokenizer(lang)
60
+ return cache[lang]
61
+
62
+ def do(self, document):
63
+ lang = document[self.language_field]
64
+ tok = self.get_tokenizer(lang)
65
+ if not tok:
66
+ return document
67
+
68
+ self.n_docs += 1
69
+ lines = document[self.field].split("\n")
70
+ tokenized = "\n".join(tok(l) for l in lines)
71
+ document[self.output_field] = tokenized
72
+ return document
73
+
74
+ def summary(self):
75
+ delay = (time.time() - self.start_time) / 3600
76
+ speed = self.n_docs / delay
77
+ return [
78
+ f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
79
+ ]
cc-multilingual-main/cc_net/cc_net/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
cc-multilingual-main/cc_net/cc_net/__init__.pyc ADDED
Binary file (105 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__main__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+
8
+ import func_argparse
9
+
10
+ import cc_net.mine
11
+
12
+
13
+ def main():
14
+ func_argparse.parse_and_call(cc_net.mine.get_main_parser())
15
+
16
+
17
+ if __name__ == "__main__":
18
+ main()
cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (142 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (136 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (444 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-312.pyc ADDED
Binary file (538 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-38.pyc ADDED
Binary file (391 Bytes). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-38.pyc ADDED
Binary file (14 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-310.pyc ADDED
Binary file (6.43 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-38.pyc ADDED
Binary file (6.25 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-38.pyc ADDED
Binary file (9.41 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-310.pyc ADDED
Binary file (40.8 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-38.pyc ADDED
Binary file (40.7 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-38.pyc ADDED
Binary file (19.7 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-310.pyc ADDED
Binary file (9.95 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-38.pyc ADDED
Binary file (9.98 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-38.pyc ADDED
Binary file (10.7 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-310.pyc ADDED
Binary file (8.96 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-38.pyc ADDED
Binary file (8.82 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-310.pyc ADDED
Binary file (3.56 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-38.pyc ADDED
Binary file (3.49 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-38.pyc ADDED
Binary file (5.23 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-38.pyc ADDED
Binary file (4.29 kB). View file
 
cc-multilingual-main/cc_net/cc_net/break.ipynb ADDED
File without changes
cc-multilingual-main/cc_net/cc_net/data/cutoff.csv ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,de,it,fr,nl,pl,pt,es,no,da,id,lt,fi,en,hu,ro,ko,ar,bn,fa,ru,uk,ml,my,lv,is,ca,ne,et,hy,ja,hr,hi,az,el,cs,bg,he,zh,ka,km,gu,mk,kn,mr,af,mn,kk,be
2
+ 0,0,0,0,10,0,0,0,0,0,0,0,10,0,0,0,20,0,0,0,0,0,0,10,10,10,0,10,0,0,0,0,0,0,0,0,0,0,10,0,10,10,10,10,10,10,0,0,10
3
+ 1,150,100,70,170,50,70,70,170,160,210,70,400,160,30,70,20,100,70,110,70,40,480,550,430,410,80,330,470,210,400,290,190,130,210,270,120,400,600,230,410,510,190,730,460,130,240,50,170
4
+ 2,170,110,90,200,70,80,90,210,190,260,90,490,180,30,80,30,180,80,150,90,50,740,840,570,490,90,420,630,300,540,360,220,200,250,350,180,520,750,360,460,580,250,1020,630,190,300,110,250
5
+ 3,190,120,90,230,70,90,90,230,210,300,100,570,200,40,90,30,220,90,180,110,60,930,980,650,530,100,470,720,360,620,400,240,240,270,410,220,620,840,420,490,630,280,1170,720,220,330,150,300
6
+ 4,200,130,100,240,80,90,100,250,220,320,110,620,210,40,100,30,250,100,190,110,60,1080,1140,710,560,110,510,780,400,670,440,260,270,280,450,230,690,900,480,530,670,300,1280,790,260,360,190,340
7
+ 5,210,130,100,260,90,100,110,260,230,340,110,670,220,40,110,30,260,100,210,120,70,1210,1260,760,580,120,540,830,430,710,470,270,300,290,480,250,760,950,520,550,690,320,1370,840,290,380,210,370
8
+ 6,220,140,110,270,90,100,110,270,240,350,120,700,230,40,110,30,280,110,220,130,70,1310,1390,790,600,120,570,870,450,750,490,280,320,300,510,270,810,990,560,560,720,340,1440,880,310,400,240,390
9
+ 7,230,140,110,280,100,110,120,280,250,370,120,740,230,50,120,30,290,110,230,140,70,1400,1500,820,620,130,590,900,480,770,510,300,340,310,540,290,850,1030,580,570,740,350,1500,920,330,410,270,410
10
+ 8,230,150,110,290,100,110,120,290,260,380,130,770,240,50,120,40,310,120,240,140,80,1470,1590,840,640,130,610,930,500,800,530,310,360,320,560,300,880,1060,610,600,760,370,1550,950,350,430,280,430
11
+ 9,240,150,120,300,100,110,120,300,270,400,130,790,250,50,120,40,320,120,240,150,80,1540,1660,860,650,140,630,960,520,820,540,310,380,330,580,310,910,1090,630,610,780,380,1600,990,370,450,310,450
12
+ 10,250,160,120,310,110,120,130,310,270,410,140,810,250,50,130,40,330,130,250,150,90,1600,1740,880,660,140,650,980,530,840,550,320,390,340,600,320,940,1110,650,620,800,390,1640,1010,380,460,330,470
13
+ 11,250,160,120,310,110,120,130,310,280,420,140,830,260,50,130,60,340,130,260,150,90,1650,1810,900,680,150,660,1000,550,860,570,330,410,350,610,330,970,1140,670,640,820,400,1680,1040,390,480,350,480
14
+ 12,260,160,130,320,110,120,130,320,290,430,140,850,260,50,130,70,350,130,270,160,90,1700,1870,920,690,150,680,1020,570,880,580,340,420,350,630,340,990,1160,690,660,840,410,1720,1060,400,490,370,500
15
+ 13,270,170,130,330,120,130,140,330,290,440,150,870,270,60,140,80,360,140,270,160,90,1750,1930,930,700,150,690,1030,580,890,590,350,440,360,640,340,1010,1180,710,680,860,420,1760,1090,410,500,380,510
16
+ 14,270,170,130,340,120,130,140,340,300,450,150,890,270,60,140,110,370,140,280,170,100,1800,1990,950,710,160,700,1050,590,910,600,360,450,370,650,350,1030,1200,730,700,880,430,1790,1110,420,510,400,520
17
+ 15,280,170,140,340,120,130,140,340,300,460,150,900,280,60,140,110,380,140,290,170,100,1850,2040,960,720,160,720,1070,600,920,610,360,460,370,670,360,1050,1220,740,710,890,430,1820,1130,430,520,420,540
18
+ 16,280,180,140,350,120,130,140,350,310,470,160,920,280,60,150,120,380,150,290,170,100,1890,2080,980,730,160,730,1080,620,940,620,370,470,380,680,360,1070,1240,750,740,910,440,1850,1150,440,530,440,550
19
+ 17,290,180,140,360,130,140,150,350,320,480,160,940,280,60,150,120,390,150,300,180,110,1940,2120,990,740,170,740,1100,630,950,630,380,480,380,690,370,1090,1260,770,760,920,450,1880,1170,450,550,450,560
20
+ 18,300,180,150,360,130,140,150,360,320,490,160,950,290,60,150,160,400,150,300,180,110,1980,2170,1000,750,170,750,1110,640,970,640,390,490,390,700,380,1100,1270,780,770,940,460,1910,1190,450,560,470,570
21
+ 19,300,190,150,370,130,140,150,370,330,500,160,970,290,70,150,230,410,160,310,180,110,2030,2200,1010,760,170,760,1130,650,980,640,390,500,390,710,380,1120,1290,790,790,950,460,1940,1210,460,570,480,580
22
+ 20,310,190,150,370,130,140,150,370,330,510,170,980,300,70,160,330,420,160,310,180,110,2070,2240,1030,770,180,770,1140,660,990,650,400,510,400,720,390,1140,1310,800,810,970,470,1970,1220,470,580,490,590
23
+ 21,310,190,150,380,140,150,160,380,340,520,170,990,300,70,160,370,420,160,320,190,120,2110,2270,1040,770,180,780,1160,670,1010,660,410,520,400,740,400,1150,1320,820,830,980,480,1990,1240,480,590,510,600
24
+ 22,320,200,160,390,140,150,160,380,340,530,170,1010,300,70,160,450,430,170,330,190,120,2160,2300,1050,780,180,790,1170,670,1020,670,410,530,410,750,400,1170,1340,830,850,1000,480,2020,1260,490,600,520,610
25
+ 23,320,200,160,390,140,150,160,390,350,540,170,1020,310,70,160,600,440,170,330,190,120,2190,2340,1060,790,190,800,1180,680,1030,680,420,540,420,760,410,1180,1350,840,870,1010,490,2040,1270,490,610,530,620
26
+ 24,330,200,160,400,140,150,160,400,350,540,170,1030,310,70,170,670,450,170,340,200,130,2230,2360,1070,800,190,810,1190,690,1040,680,430,550,420,770,410,1200,1370,850,890,1020,500,2060,1290,500,620,550,630
27
+ 25,340,210,170,400,150,160,170,400,360,550,180,1050,320,80,170,740,460,170,340,200,130,2270,2390,1080,810,190,820,1210,700,1050,690,440,560,430,780,420,1210,1380,860,910,1040,500,2090,1300,510,630,560,640
28
+ 26,340,210,170,410,150,160,170,410,360,560,180,1060,320,80,170,790,460,180,350,200,130,2300,2420,1090,820,190,830,1220,710,1060,700,440,570,430,780,420,1220,1400,870,930,1050,510,2110,1320,510,640,570,640
29
+ 27,350,210,170,420,150,160,170,410,370,570,180,1070,320,80,170,840,470,180,350,200,130,2340,2450,1100,830,200,840,1230,720,1080,700,450,580,440,790,430,1240,1410,880,960,1070,510,2140,1330,520,650,580,650
30
+ 28,350,220,180,420,150,160,170,420,370,580,180,1090,330,80,180,840,480,180,360,210,140,2370,2470,1110,840,200,850,1240,730,1090,710,460,580,440,800,440,1250,1430,890,990,1080,520,2160,1350,530,660,590,660
31
+ 29,360,220,180,430,160,160,180,430,380,590,190,1100,330,80,180,890,490,190,370,210,140,2400,2500,1120,850,200,860,1250,740,1100,720,470,590,450,810,440,1270,1440,900,1010,1100,520,2180,1370,530,670,600,670
32
+ 30,370,220,180,430,160,170,180,430,380,600,190,1110,340,80,180,920,490,190,370,210,140,2430,2530,1130,850,210,860,1270,750,1110,720,480,600,450,820,450,1280,1460,910,1040,1110,530,2210,1380,540,680,610,670
33
+ 31,370,230,190,440,160,170,180,440,390,610,190,1120,340,80,180,920,500,190,380,220,140,2470,2550,1140,860,210,870,1280,750,1120,730,480,610,460,830,460,1290,1470,920,1070,1120,540,2230,1400,550,690,620,680
34
+ 32,380,230,190,450,160,170,180,440,390,620,190,1140,350,90,180,920,510,200,390,220,150,2510,2570,1150,870,210,880,1290,760,1130,740,490,620,460,840,460,1310,1490,930,1100,1140,540,2250,1410,550,700,630,690
35
+ 33,380,230,190,450,170,170,190,450,400,630,190,1150,350,90,190,940,520,200,390,220,150,2540,2590,1160,880,220,890,1300,770,1140,740,500,630,470,850,470,1320,1500,940,1120,1150,550,2270,1430,560,710,650,700
36
+ 34,390,240,200,460,170,180,190,460,410,640,200,1160,350,90,190,940,530,200,400,220,150,2570,2610,1170,890,220,900,1310,780,1150,750,510,640,470,860,470,1330,1520,960,1150,1160,550,2290,1440,570,720,660,700
37
+ 35,400,240,200,460,170,180,190,460,410,640,200,1170,360,90,190,940,530,210,410,230,150,2590,2630,1180,900,220,910,1320,790,1160,760,520,650,480,870,480,1350,1530,970,1180,1180,560,2310,1460,580,730,670,710
38
+ 36,400,240,200,470,170,180,190,470,420,660,200,1180,360,90,190,1010,540,210,410,230,160,2620,2650,1190,910,230,920,1330,800,1170,760,520,660,480,880,490,1360,1540,980,1210,1190,560,2330,1470,580,740,680,720
39
+ 37,410,250,210,480,180,190,200,480,420,670,200,1200,370,90,200,1010,550,210,420,230,160,2650,2660,1200,920,230,930,1340,810,1180,770,530,670,490,890,490,1370,1560,990,1240,1200,570,2350,1490,590,750,690,730
40
+ 38,410,250,210,480,180,190,200,480,430,680,210,1210,370,100,200,1020,560,210,430,230,160,2680,2680,1210,930,230,930,1350,820,1190,770,540,680,500,900,500,1390,1570,1000,1270,1220,580,2370,1500,600,760,700,730
41
+ 39,420,260,210,490,180,190,200,490,440,690,210,1220,380,100,200,1020,570,220,440,240,160,2710,2700,1220,930,240,940,1360,830,1200,780,550,690,500,910,510,1400,1590,1010,1300,1230,580,2390,1520,600,770,710,740
42
+ 40,430,260,220,490,190,190,210,500,440,700,210,1230,380,100,200,1020,570,220,440,240,170,2740,2720,1240,940,240,950,1370,840,1210,790,560,700,510,920,510,1410,1610,1020,1330,1250,590,2410,1540,610,780,720,750
43
+ 41,430,260,220,500,190,200,210,500,450,710,210,1240,390,100,210,1050,580,220,450,240,170,2770,2740,1250,950,240,960,1380,850,1230,790,570,710,510,930,520,1430,1620,1030,1360,1260,600,2430,1550,620,790,730,760
44
+ 42,440,270,220,510,190,200,210,510,450,720,210,1260,390,100,210,1050,590,230,460,250,170,2800,2760,1260,960,250,970,1390,860,1240,800,580,720,520,940,530,1440,1640,1040,1400,1270,600,2450,1570,630,800,740,770
45
+ 43,450,270,230,510,190,200,220,520,460,730,220,1270,400,110,210,1050,600,230,470,250,170,2820,2770,1270,970,250,980,1410,860,1250,800,590,730,530,940,530,1450,1650,1050,1430,1290,610,2470,1580,630,810,750,770
46
+ 44,450,280,230,520,200,210,220,530,470,740,220,1280,400,110,220,1050,610,230,480,250,180,2840,2790,1280,980,250,980,1420,870,1260,810,600,740,530,950,540,1470,1670,1070,1470,1300,610,2490,1600,640,820,750,780
47
+ 45,460,280,240,530,200,210,220,530,480,750,220,1290,410,110,220,1180,620,240,490,260,180,2870,2800,1290,990,260,990,1430,890,1270,820,610,760,540,970,550,1480,1690,1080,1510,1320,620,2510,1610,650,830,770,790
48
+ 46,470,280,240,530,200,210,230,540,480,760,220,1310,420,110,220,1180,630,240,500,260,180,2900,2820,1300,1000,260,1000,1440,900,1280,820,620,770,550,980,550,1500,1700,1090,1550,1330,630,2530,1630,650,840,780,790
49
+ 47,470,290,250,540,210,220,230,550,490,780,230,1320,420,110,220,1260,640,250,510,260,180,2930,2840,1310,1010,270,1010,1450,910,1290,830,630,780,550,990,560,1510,1720,1100,1580,1350,630,2540,1640,660,850,790,800
50
+ 48,480,290,250,550,210,220,230,560,500,790,230,1330,430,120,230,1410,650,250,520,270,190,2950,2850,1320,1020,270,1020,1460,920,1300,840,640,790,560,1000,570,1530,1740,1120,1620,1360,640,2570,1660,670,860,800,810
51
+ 49,490,300,260,560,210,220,240,570,500,800,230,1340,440,120,230,1430,660,250,530,270,190,2970,2870,1330,1030,270,1030,1470,930,1310,840,650,800,570,1010,580,1540,1750,1130,1650,1370,650,2580,1670,680,880,810,820
52
+ 50,500,300,260,560,220,230,240,570,510,810,230,1360,440,120,230,1540,670,260,550,270,190,3000,2880,1350,1050,280,1040,1480,940,1330,850,660,820,580,1020,590,1560,1770,1140,1690,1390,660,2600,1690,680,890,820,830
53
+ 51,500,310,270,570,220,230,250,580,520,830,240,1370,450,120,240,1560,680,260,560,280,200,3020,2900,1360,1060,280,1050,1500,950,1340,850,670,830,580,1030,600,1570,1790,1160,1730,1410,660,2620,1710,690,900,830,830
54
+ 52,510,310,270,580,220,230,250,590,530,840,240,1380,460,120,240,1610,690,270,570,280,200,3040,2910,1370,1070,290,1050,1510,970,1350,860,680,840,590,1040,600,1590,1810,1180,1780,1420,670,2640,1720,700,910,840,840
55
+ 53,520,320,280,590,230,240,250,600,540,850,240,1400,460,130,240,1700,700,270,580,280,200,3070,2930,1390,1090,290,1060,1520,980,1360,870,700,850,600,1050,610,1600,1830,1190,1820,1440,680,2660,1740,710,920,850,850
56
+ 54,530,320,280,590,230,240,260,610,550,870,250,1410,470,130,250,1730,710,280,600,290,200,3090,2940,1400,1100,300,1070,1540,990,1370,870,710,870,610,1060,620,1610,1840,1210,1870,1460,690,2680,1760,710,930,860,860
57
+ 55,540,330,290,600,240,250,260,620,560,880,250,1430,480,130,250,1800,720,280,620,290,210,3110,2960,1410,1120,300,1080,1550,1000,1380,880,720,880,620,1080,630,1630,1860,1220,1910,1480,700,2700,1780,720,950,870,860
58
+ 56,550,340,300,610,240,250,270,630,560,900,250,1440,490,130,250,1850,730,280,630,300,210,3130,2980,1430,1130,300,1090,1560,1020,1400,890,740,890,630,1090,640,1650,1880,1240,1960,1490,700,2720,1790,730,960,880,870
59
+ 57,560,340,300,620,240,260,270,640,570,910,250,1450,500,140,260,1950,750,290,650,300,210,3150,2990,1440,1150,310,1100,1580,1030,1410,890,750,900,640,1100,650,1660,1900,1260,2020,1510,710,2740,1810,740,970,900,880
60
+ 58,570,350,310,630,250,260,280,660,580,930,260,1470,510,140,260,1950,760,290,670,300,220,3170,3010,1450,1160,310,1110,1590,1050,1420,900,760,920,650,1110,660,1680,1920,1270,2070,1530,720,2760,1830,750,990,910,890
61
+ 59,580,350,320,640,250,260,280,670,590,950,260,1480,510,140,260,2040,770,300,680,310,220,3200,3020,1470,1170,320,1120,1600,1060,1440,910,780,930,660,1130,670,1700,1940,1290,2130,1550,730,2780,1850,760,1000,920,900
62
+ 60,590,360,330,650,260,270,290,680,600,970,270,1500,520,140,270,2130,790,300,700,310,220,3220,3040,1480,1190,330,1130,1620,1080,1450,910,790,950,670,1140,680,1710,1950,1310,2180,1570,740,2800,1870,770,1010,930,910
63
+ 61,600,370,340,660,260,270,290,690,620,980,270,1510,530,150,270,2210,800,310,720,320,230,3240,3050,1500,1210,330,1140,1630,1090,1460,920,810,960,680,1160,690,1730,1970,1330,2230,1580,750,2820,1890,780,1030,940,920
64
+ 62,610,370,340,670,270,280,300,710,630,1000,270,1530,550,150,280,2280,810,320,740,320,230,3260,3060,1510,1220,340,1160,1650,1110,1470,930,830,980,690,1170,700,1750,2000,1350,2290,1600,760,2840,1920,790,1040,950,920
65
+ 63,620,380,350,680,270,290,300,720,640,1020,280,1550,560,150,280,2290,830,320,770,330,230,3280,3080,1530,1240,340,1170,1660,1130,1490,930,840,990,700,1190,720,1770,2020,1370,2350,1620,770,2860,1940,800,1060,970,930
66
+ 64,630,390,360,690,280,290,310,730,650,1040,280,1560,570,160,290,2310,850,330,790,330,240,3300,3090,1540,1260,350,1180,1680,1150,1500,940,860,1010,720,1200,730,1790,2040,1400,2400,1650,780,2880,1960,810,1080,980,950
67
+ 65,640,400,370,700,280,300,320,750,670,1060,280,1580,580,160,290,2380,860,330,810,340,240,3320,3110,1560,1280,360,1190,1690,1170,1520,950,880,1020,730,1220,740,1810,2060,1420,2460,1670,790,2900,1990,820,1100,990,950
68
+ 66,660,410,380,710,290,300,320,770,680,1090,290,1600,600,160,300,2400,880,340,840,340,240,3340,3120,1580,1310,360,1200,1710,1190,1530,960,900,1030,740,1230,750,1830,2090,1450,2510,1690,810,2920,2010,830,1120,1010,960
69
+ 67,670,410,400,730,300,310,330,780,690,1110,290,1620,610,170,300,2420,900,350,870,350,250,3360,3140,1600,1330,370,1210,1730,1210,1550,960,920,1050,760,1250,770,1850,2120,1480,2570,1710,820,2950,2030,840,1130,1020,970
70
+ 68,680,420,410,740,300,320,340,800,710,1130,300,1640,630,170,310,2450,920,360,890,350,250,3380,3150,1620,1350,380,1230,1740,1230,1570,970,940,1070,770,1270,780,1870,2140,1510,2620,1740,830,2970,2050,850,1150,1030,980
71
+ 69,700,430,420,750,310,320,340,820,720,1150,300,1650,640,170,310,2490,940,360,920,360,260,3400,3170,1640,1380,390,1240,1760,1250,1580,980,960,1080,790,1290,790,1890,2170,1540,2670,1760,850,2990,2080,870,1180,1050,990
72
+ 70,710,440,430,770,320,330,350,840,740,1180,310,1670,660,180,320,2500,960,370,950,370,260,3420,3190,1660,1400,400,1250,1780,1270,1600,990,980,1100,800,1300,810,1910,2200,1570,2710,1790,860,3010,2100,880,1200,1060,1000
73
+ 71,730,460,450,780,320,340,360,860,760,1200,310,1690,680,180,320,2570,980,380,980,370,270,3440,3210,1680,1430,400,1270,1800,1290,1620,990,1010,1120,820,1320,830,1940,2230,1600,2760,1810,880,3030,2130,890,1220,1080,1010
74
+ 72,740,470,460,800,330,350,370,890,770,1230,320,1710,700,190,330,2660,1000,390,1010,380,270,3460,3220,1700,1460,410,1280,1820,1320,1640,1000,1030,1140,840,1340,840,1960,2260,1640,2810,1840,890,3060,2160,900,1250,1100,1030
75
+ 73,760,480,480,810,340,350,380,910,790,1260,330,1740,720,190,340,2730,1030,400,1050,390,280,3480,3240,1730,1490,420,1300,1840,1340,1660,1010,1060,1170,860,1360,860,1990,2290,1670,2860,1870,910,3080,2190,920,1270,1110,1040
76
+ 74,780,500,500,830,350,360,390,940,810,1290,330,1760,740,200,340,2850,1050,410,1080,400,280,3500,3260,1750,1520,430,1320,1860,1360,1680,1020,1080,1190,880,1390,880,2010,2320,1710,2900,1900,930,3110,2220,930,1300,1130,1050
77
+ 75,800,510,520,850,360,370,400,970,830,1320,340,1780,770,200,350,2930,1070,420,1110,400,290,3520,3280,1780,1560,440,1330,1880,1390,1710,1030,1110,1220,900,1410,900,2030,2350,1760,2940,1930,950,3130,2250,950,1330,1150,1060
78
+ 76,820,530,530,870,370,380,410,1000,860,1350,350,1810,800,210,360,2980,1100,440,1150,410,290,3540,3290,1800,1600,460,1350,1900,1420,1730,1040,1150,1240,920,1430,920,2060,2390,1800,2980,1940,970,3160,2280,970,1360,1170,1070
79
+ 77,840,550,550,890,380,390,430,1030,880,1380,350,1830,830,210,360,2990,1130,450,1190,420,300,3560,3310,1830,1630,470,1370,1930,1450,1750,1060,1180,1260,950,1460,940,2090,2420,1850,3020,1980,990,3190,2320,980,1400,1190,1090
80
+ 78,860,570,570,910,390,400,440,1070,910,1420,360,1860,860,220,370,3080,1160,470,1230,430,310,3580,3330,1860,1670,480,1390,1950,1480,1780,1070,1220,1290,970,1490,960,2120,2460,1900,3060,2010,1010,3210,2350,1000,1430,1210,1100
81
+ 79,890,590,600,930,400,410,450,1110,940,1460,370,1880,890,220,380,3170,1200,480,1270,440,310,3600,3350,1890,1720,500,1400,1980,1510,1810,1080,1260,1320,1000,1520,990,2150,2500,1950,3100,2050,1030,3240,2390,1030,1470,1230,1120
82
+ 80,920,620,630,960,410,420,470,1150,970,1500,380,1910,930,230,390,3210,1230,500,1320,450,320,3620,3370,1920,1760,510,1430,2010,1540,1830,1090,1300,1350,1030,1550,1010,2180,2540,2020,3140,2100,1060,3260,2420,1050,1510,1250,1130
83
+ 81,950,640,660,990,430,440,480,1200,1000,1540,390,1940,970,240,410,3290,1260,520,1370,460,330,3640,3390,1960,1810,520,1450,2040,1580,1860,1110,1340,1390,1060,1580,1040,2220,2590,2080,3180,2140,1090,3290,2460,1070,1540,1280,1150
84
+ 82,980,670,700,1010,440,450,500,1260,1030,1580,400,1980,1010,250,420,3370,1300,540,1430,480,340,3660,3410,1990,1860,540,1470,2070,1610,1890,1120,1390,1430,1100,1620,1070,2250,2630,2160,3230,2190,1120,3320,2500,1100,1590,1310,1170
85
+ 83,1010,710,740,1050,450,460,520,1320,1070,1630,410,2010,1060,260,430,3420,1340,570,1490,490,350,3680,3430,2030,1920,560,1500,2110,1640,1930,1140,1440,1470,1130,1650,1100,2290,2680,2230,3260,2240,1150,3350,2540,1130,1640,1340,1190
86
+ 84,1050,750,780,1080,470,480,540,1390,1110,1690,420,2050,1110,260,450,3460,1390,590,1550,510,360,3690,3460,2080,1990,580,1530,2150,1680,1960,1150,1490,1520,1170,1690,1140,2330,2730,2320,3310,2290,1190,3380,2590,1170,1690,1370,1210
87
+ 85,1100,800,830,1120,490,500,560,1470,1160,1740,440,2090,1170,270,470,3540,1440,630,1620,520,370,3710,3480,2120,2060,600,1560,2190,1730,2000,1170,1550,1570,1220,1740,1180,2370,2780,2420,3350,2350,1230,3410,2600,1200,1740,1400,1220
88
+ 86,1150,860,890,1170,500,520,590,1560,1210,1810,450,2130,1230,280,490,3620,1490,660,1680,540,390,3730,3510,2170,2140,630,1600,2230,1770,2040,1190,1620,1630,1260,1780,1220,2410,2830,2550,3390,2410,1280,3440,2660,1240,1790,1450,1250
89
+ 87,1200,930,960,1210,520,540,630,1660,1270,1870,460,2180,1300,300,510,3670,1550,700,1710,560,400,3750,3540,2220,2210,650,1630,2280,1820,2080,1210,1690,1680,1320,1830,1260,2460,2890,2700,3430,2490,1330,3470,2700,1280,1850,1490,1280
90
+ 88,1260,1010,1040,1260,550,570,660,1770,1340,1930,480,2240,1380,310,540,3670,1610,740,1740,590,420,3770,3560,2270,2280,690,1670,2320,1880,2130,1240,1760,1750,1380,1890,1320,2520,2950,2880,3470,2570,1380,3510,2750,1330,1920,1540,1310
91
+ 89,1340,1100,1140,1320,570,600,710,1870,1420,2010,500,2300,1460,330,570,3670,1680,780,1760,610,430,3790,3590,2340,2360,720,1720,2380,1930,2190,1260,1840,1820,1450,1950,1370,2580,3020,3080,3520,2640,1440,3540,2810,1380,2000,1590,1350
92
+ 90,1420,1220,1260,1390,610,630,760,1980,1510,2110,520,2370,1570,350,620,3670,1760,840,1800,640,460,3810,3610,2400,2430,760,1770,2440,2000,2250,1290,1920,1910,1530,2010,1440,2630,3090,3350,3560,2710,1510,3580,2870,1420,2090,1650,1390
93
+ 91,1530,1360,1390,1460,650,670,820,2100,1620,2220,550,2440,1700,380,680,3670,1850,900,1880,670,480,3830,3650,2470,2510,800,1830,2510,2070,2310,1320,2000,2000,1620,2090,1510,2700,3160,3750,3600,2800,1590,3610,2930,1470,2200,1710,1430
94
+ 92,1660,1440,1560,1550,690,720,900,2240,1730,2340,580,2530,1860,410,750,3670,1940,970,2000,710,510,3850,3680,2550,2590,850,1900,2590,2150,2380,1360,2100,2100,1730,2180,1600,2770,3240,3850,3650,2900,1670,3650,3010,1530,2310,1790,1490
95
+ 93,1830,1610,1760,1650,750,780,980,2380,1870,2480,610,2630,2060,450,830,3680,2050,1060,2110,760,550,3870,3710,2640,2650,920,1960,2680,2250,2450,1410,2200,2220,1850,2280,1700,2860,3320,3860,3690,2990,1760,3690,3050,1610,2420,1880,1540
96
+ 94,2060,1910,1870,1780,820,860,1090,2500,2010,2640,650,2750,2270,500,890,3680,2170,1170,2220,820,590,3890,3750,2740,2740,1000,2060,2780,2360,2530,1460,2320,2360,1990,2400,1820,2950,3400,3870,3730,3100,1870,3730,3140,1720,2560,1990,1620
97
+ 95,2350,2300,2030,1930,920,960,1260,2640,2200,2810,700,2880,2520,550,960,3750,2320,1300,2350,900,640,3910,3780,2850,2840,1100,2180,2900,2480,2640,1530,2480,2510,2130,2540,1980,3060,3490,3870,3770,3220,2020,3760,3230,1840,2730,2120,1720
98
+ 96,2690,2570,2390,2110,1050,1090,1520,2830,2450,3060,770,3050,2750,580,1010,3800,2480,1470,2540,990,710,3930,3820,2980,2950,1250,2330,3040,2620,2770,1620,2680,2690,2350,2710,2170,3180,3590,3880,3810,3340,2180,3810,3340,2010,2930,2280,1860
99
+ 97,3140,2790,2910,2360,1220,1290,1870,3090,2770,3420,850,3250,3260,680,1060,3860,2680,1710,2770,1130,790,3950,3860,3150,3090,1450,2530,3210,2790,2950,1740,2920,2890,2610,2860,2440,3320,3680,3880,3850,3480,2410,3850,3470,2230,3170,2500,2010
100
+ 98,3560,3270,3230,2670,1500,1610,2260,3370,3160,3840,990,3470,3460,830,1140,3920,2950,2070,2990,1370,950,3970,3910,3350,3280,1680,2800,3440,3030,3160,1930,3210,3150,2930,3100,2820,3500,3780,3880,3910,3610,2730,3900,3620,2590,3400,2820,2310
101
+ 99,3560,3660,3520,3150,1880,2290,2540,3630,3590,3860,1270,3720,3590,1230,1630,3950,3330,2640,3370,1850,1320,3990,3950,3630,3570,2210,3240,3710,3370,3460,2290,3570,3500,3370,3470,3410,3730,3890,3890,3950,3800,3200,3950,3810,3200,3690,3270,2850
cc-multilingual-main/cc_net/cc_net/data/test_stats.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "2019-09/de_head_0000.json.gz": {
3
+ "size": 5264993,
4
+ "checksum": "fc12ba3dc982ef06e7e44a916f298e1c16f9a806"
5
+ },
6
+ "2019-09/de_middle_0000.json.gz": {
7
+ "size": 9195535,
8
+ "checksum": "2369ff0296ab1d924c81083f17ce41f22a10ad69"
9
+ },
10
+ "2019-09/de_tail_0000.json.gz": {
11
+ "size": 33029074,
12
+ "checksum": "18865040a7263242d298958f358f7cb5511114d4"
13
+ },
14
+ "2019-09/fr_head_0000.json.gz": {
15
+ "size": 4076580,
16
+ "checksum": "4eef4017bbbe042fc01c45b5fbcf94de49f5138e"
17
+ },
18
+ "2019-09/fr_middle_0000.json.gz": {
19
+ "size": 8075095,
20
+ "checksum": "fd251a5b924c4aa66a63c375ca3a8fae23b3273b"
21
+ },
22
+ "2019-09/fr_tail_0000.json.gz": {
23
+ "size": 27248949,
24
+ "checksum": "4a8aed38abc6b9d04459e8d424bd47426f063638"
25
+ },
26
+ "2019-09/it_head_0000.json.gz": {
27
+ "size": 1760696,
28
+ "checksum": "e5e50e49b4a5147ea82b385babd5c83f74d2a4ed"
29
+ },
30
+ "2019-09/it_middle_0000.json.gz": {
31
+ "size": 4461832,
32
+ "checksum": "7daab7b7acb93d81e50534196ada4e94947b8224"
33
+ },
34
+ "2019-09/it_tail_0000.json.gz": {
35
+ "size": 14754298,
36
+ "checksum": "1adc018519a598ff162261d7e480ea41d3458768"
37
+ }
38
+ }
cc-multilingual-main/cc_net/cc_net/dedup.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Tools to remove duplicate paragraphs across one or several shards.
9
+ """
10
+
11
+ import argparse
12
+ import gc
13
+ import hashlib
14
+ import logging
15
+ import multiprocessing
16
+ import os
17
+ import tempfile
18
+ import time
19
+ from pathlib import Path
20
+ from typing import Iterable, List, Optional, Set, Union
21
+
22
+ import numpy as np
23
+
24
+ from cc_net import jsonql
25
+ from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
26
+ from cc_net.jsonql import mem_footprint_gb
27
+ from cc_net.text_normalizer import normalize_for_dedup
28
+
29
+ BYTE_ORDER = "little"
30
+ HASH_SIZE = HASH_TYPE(0).nbytes
31
+ DISABLE_MULTI_PROCESSING = False
32
+
33
+ FilesOrDir = Union[List[Path], Path]
34
+
35
+
36
+ def get_args():
37
+ parser = argparse.ArgumentParser(
38
+ description="Read a set of json files and allow to query them",
39
+ parents=[jsonql.io_parser()],
40
+ )
41
+
42
+ parser.add_argument("--field", type=str, default="raw_content")
43
+ parser.add_argument("--output_hashes", type=str)
44
+ parser.add_argument("--no_finalize", action="store_false", dest="finalize")
45
+ # parser.add_argument("--mem_gb", type=int)
46
+ parser.add_argument("--hashes", type=str)
47
+
48
+ return vars(parser.parse_args())
49
+
50
+
51
+ def _b2i(b: bytes) -> int:
52
+ return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
53
+
54
+
55
+ def str_hash(s: str) -> int:
56
+ h = hashlib.sha1(bytes(s, encoding="utf-8"))
57
+ return _b2i(h.digest())
58
+
59
+
60
+ log = logging.getLogger(__name__).info
61
+
62
+
63
+ def run_par(processes):
64
+ # This is different from multiprocessing.map since it allows for kwargs.
65
+ processes = list(processes)
66
+ if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
67
+ for f, args, kwargs in processes:
68
+ f(*args, **kwargs)
69
+ return
70
+
71
+ log(f"Starting {len(processes)} subprocess")
72
+ processes = [
73
+ multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
74
+ ]
75
+ for p in processes:
76
+ p.start()
77
+ for p in processes:
78
+ p.join()
79
+ failed = 0
80
+ for p in processes:
81
+ if p.exitcode != 0:
82
+ log(f"Process failed with code {p.exitcode}: {p}")
83
+ failed += 1
84
+ assert failed == 0, f"{failed} processes failed..."
85
+
86
+
87
+ def split_file(file, n_splits):
88
+ for i in range(n_splits):
89
+ yield jsonql.SplitFile(file, i, n_splits)
90
+
91
+
92
+ def merge(hashes_1, hashes_2, output):
93
+ if isinstance(hashes_1, str):
94
+ h1 = FlatHashSet()
95
+ h1.load(hashes_1)
96
+ else:
97
+ h1 = hashes_1
98
+
99
+ if isinstance(hashes_2, str):
100
+ h2 = FlatHashSet()
101
+ h2.load(hashes_2)
102
+ else:
103
+ h2 = hashes_2
104
+
105
+ h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
106
+ dup = h1.__contains__(h2_np)
107
+
108
+ # Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
109
+ # h1 with their value.
110
+ h1[h2_np] = dup
111
+ if output:
112
+ h1.dump(output)
113
+ return h1
114
+
115
+
116
+ def merge_shard(hash_files, output):
117
+ h = FlatHashSet()
118
+ h.load(hash_files[0])
119
+ for hash_file in hash_files[1:]:
120
+ h = merge(h, hash_file, output=None)
121
+ print(f"Merged {hash_file}. We now have {len(h)} hashes.")
122
+
123
+ h.dump(output)
124
+ print(f"Saved {len(h)} hashes to {output}.")
125
+
126
+
127
+ def _dump_sentence_hashes(source: Path, output: Path, field: str):
128
+ treated = 0
129
+ started = time.time()
130
+ with open(output, "wb") as o:
131
+ for doc in jsonql.read_jsons(source):
132
+ content = doc.get(field)
133
+ if not content:
134
+ continue
135
+ h = compute_hashes(content)
136
+ if h is None:
137
+ continue
138
+ h.tofile(o)
139
+ treated += 1
140
+ if treated % 100_000 == 0:
141
+ delay = time.time() - started
142
+ log(
143
+ f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
144
+ )
145
+
146
+
147
+ def _remove_duplicate_hashes(duplicates, source, output):
148
+ batch_size = 100_000
149
+ n_lines, n_lines_kept = 0, 0
150
+ with open(source, "rb") as f, open(output, "wb") as o:
151
+ log(f"Opening {source} with mode rb")
152
+ log(f"Opening {output} with mode wb")
153
+ while True:
154
+ hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
155
+ if hashes.size == 0:
156
+ break
157
+
158
+ keep = duplicates[hashes] < 1
159
+ kept = keep.sum()
160
+ hashes *= keep
161
+ hashes.tofile(o)
162
+
163
+ n_lines += hashes.size
164
+ n_lines_kept += kept
165
+
166
+ removed = n_lines - n_lines_kept
167
+ selectivity = n_lines_kept / n_lines if n_lines else 0
168
+ log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
169
+
170
+
171
+ def remove_duplicates_sharded(
172
+ files: List[Path],
173
+ outputs: List[Path],
174
+ hashes_dir: FilesOrDir,
175
+ field: str,
176
+ group_hashes: int = 1,
177
+ tmp_dir: Path = None,
178
+ min_len: int = 0,
179
+ ):
180
+ """Remove duplicates in several passes, when all hashes don't fit in RAM.
181
+
182
+ Note: The current implementation is not doing a 'perfect' deduplication.
183
+ If a hash appear exactly once in each shard of hashes it won't be detected
184
+ as a duplicate. This can be fixed if hashes are fully dedup beforehand.
185
+ """
186
+ assert len(files) == len(outputs)
187
+
188
+ if isinstance(hashes_dir, list):
189
+ hashes_files = hashes_dir
190
+ else:
191
+ hashes_files = sorted(
192
+ h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
193
+ )
194
+
195
+ assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
196
+
197
+ if len(hashes_files) <= group_hashes:
198
+ log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
199
+ rm_dups = DuplicatesRemover(field, hashes_files)
200
+ rm_dups._prepare()
201
+ run_par(
202
+ (jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
203
+ for f, o in zip(files, outputs)
204
+ )
205
+ return
206
+
207
+ log(f"Starting deduplicate_sharded on {files}.")
208
+ tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
209
+
210
+ def tmp_files(i):
211
+ return [
212
+ Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
213
+ for f in files
214
+ ]
215
+
216
+ last = tmp_files(0)
217
+ run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
218
+
219
+ if isinstance(hashes_dir, list):
220
+ hashes_files = hashes_dir
221
+ else:
222
+ hashes_files = sorted(
223
+ h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
224
+ )
225
+ for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
226
+ hashes = FlatHashSet()
227
+ for h in group:
228
+ hashes.load(h)
229
+ log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
230
+
231
+ intermediates = tmp_files(i + 1)
232
+ # Remove hashes in parallel. Since modern OS have "copy-on-write" and
233
+ # `hashes` is read-only, we will only have one version of it in RAM.
234
+ run_par(
235
+ (_remove_duplicate_hashes, (hashes, f, tmp), {})
236
+ for f, tmp in zip(last, intermediates)
237
+ )
238
+ # Force hashes to be freed, before we start allocating a new one.
239
+ del hashes
240
+ gc.collect()
241
+
242
+ for tmp in last:
243
+ os.remove(tmp)
244
+ last = intermediates
245
+
246
+ def finalize(source, dedup_hashes, min_len):
247
+ n_chars, n_chars_kept = 0, 0
248
+ with open(dedup_hashes, "rb") as hashes:
249
+ for doc in jsonql.read_jsons(source):
250
+ content = doc.get(field)
251
+ if not content or len(content) < min_len:
252
+ continue
253
+ sentences = content.split("\n")
254
+ doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
255
+ chars, kept_chars = finalize_doc(doc, field, doc_hashes)
256
+ n_chars += chars
257
+ n_chars_kept += kept_chars
258
+ yield doc
259
+ selectivity = n_chars_kept / n_chars if n_chars else 0
260
+ log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
261
+
262
+ dedup_hashes = last
263
+ run_par(
264
+ [
265
+ (
266
+ jsonql.run_pipe,
267
+ (finalize,),
268
+ dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
269
+ )
270
+ for h, f, o in zip(dedup_hashes, files, outputs)
271
+ ]
272
+ )
273
+
274
+ tmp_directory.cleanup()
275
+
276
+
277
+ def compute_hashes(content) -> Optional[np.ndarray]:
278
+ if not content:
279
+ return None
280
+ lines = content.split("\n")
281
+ # save hashes as bytes but reinterpret them as uint64.
282
+ hashes = np.fromiter(
283
+ (
284
+ hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
285
+ :HASH_SIZE
286
+ ]
287
+ for l in lines
288
+ ),
289
+ dtype=np.dtype((bytes, HASH_SIZE)),
290
+ count=len(lines),
291
+ )
292
+ return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
293
+
294
+
295
+ def finalize_doc(doc, field, hashes=None):
296
+ content = doc.get(field)
297
+ lines = content.split("\n")
298
+ n_chars = len(content)
299
+ if "original_nlines" not in doc:
300
+ doc["original_nlines"] = doc.get("nlines", len(lines))
301
+ if "original_length" not in doc:
302
+ doc["original_length"] = doc.get("length", n_chars)
303
+ if hashes is None:
304
+ hashes = doc.pop(field + "_hash")
305
+
306
+ # Remove duplicates inside doc
307
+ seen: Set[int] = set()
308
+ original_line_ids = doc.get("line_ids", range(len(hashes)))
309
+ line_ids = []
310
+ new_lines = []
311
+ for l, line, h in zip(original_line_ids, lines, hashes):
312
+ if h not in seen and h != 0:
313
+ line_ids.append(l)
314
+ new_lines.append(line)
315
+ seen.add(h)
316
+
317
+ doc[field] = "\n".join(new_lines)
318
+ doc["nlines"] = len(line_ids)
319
+ n_chars_kept = len(doc[field])
320
+ doc["length"] = n_chars_kept
321
+ doc["line_ids"] = line_ids
322
+ return n_chars, n_chars_kept
323
+
324
+
325
+ class HashesCollector(jsonql.Transformer):
326
+ """
327
+ Collect all hashes found of lines found in the `field` of the source documents.
328
+ """
329
+
330
+ parallelisable = False
331
+
332
+ def __init__(
333
+ self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
334
+ ):
335
+ super().__init__()
336
+ self.n_lines = 0
337
+ self.field = field
338
+ self.output = output
339
+ self.hashes = FlatHashSet() if hashes is None else hashes
340
+ self.num_hashes_end = 0
341
+ self.num_hashes_start = len(self.hashes)
342
+
343
+ def summary(self) -> List[str]:
344
+ summ = super().summary()
345
+ h = self.num_hashes_end if self.hashes is None else len(self.hashes)
346
+ h = (h - self.num_hashes_start) // 1000
347
+ max_mem = mem_footprint_gb()
348
+ n = self.n_lines // 1000
349
+ summ.append(
350
+ f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
351
+ )
352
+ return summ
353
+
354
+ def do(self, doc: dict) -> None:
355
+ doc_hashes = compute_hashes(doc.get(self.field))
356
+ if doc_hashes is None:
357
+ return
358
+ self.hashes.add(doc_hashes)
359
+ self.n_lines += doc_hashes.size
360
+
361
+ def close(self):
362
+ if self.output and self.hashes:
363
+ self.hashes.dump(self.output)
364
+ self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
365
+ # Save the number of hashes.
366
+ self.num_hashes_end = len(self.hashes)
367
+ # Free up mem even if the transformer is kept somewhere else.
368
+ self.hashes = None # type: ignore
369
+
370
+
371
+ class DuplicatesRemover(jsonql.Transformer):
372
+ """DuplicatesRemover"""
373
+
374
+ # The hashes can't be pickled so they will have to be read back from disk.
375
+ warn_when_pickling = True
376
+
377
+ def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
378
+ """
379
+ Remove duplicates
380
+ """
381
+ super().__init__()
382
+ self.field = field
383
+ self.collect = collect
384
+
385
+ self.hashes_files = hashes_files
386
+ self.duplicates: Optional[AbstractDedupHashSet] = None
387
+
388
+ self.n_lines, self.n_lines_kept = 0, 0
389
+ self.n_chars, self.n_chars_kept = 0, 0
390
+
391
+ def _prepare(self):
392
+ if self.duplicates is not None:
393
+ return
394
+ self.duplicates = FlatHashSet()
395
+
396
+ start = time.time()
397
+ for h in self.hashes_files:
398
+ shard_start = time.time()
399
+ self.duplicates.load(str(h))
400
+ delay = time.time() - shard_start
401
+ self.log(
402
+ f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
403
+ )
404
+
405
+ delay = time.time() - start
406
+ self.log(
407
+ f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
408
+ )
409
+
410
+ def do(self, doc: dict) -> Optional[dict]:
411
+ content = doc.get(self.field)
412
+ if not content:
413
+ return None
414
+ doc_hashes = compute_hashes(content)
415
+
416
+ assert self.duplicates is not None
417
+ seen = (
418
+ self.duplicates.add(doc_hashes)
419
+ if self.collect
420
+ else self.duplicates[doc_hashes]
421
+ )
422
+ keep = seen < True
423
+ kept = keep.sum()
424
+ if kept == 0:
425
+ return None
426
+ doc_hashes = doc_hashes * keep
427
+ self.n_lines += keep.size
428
+ self.n_lines_kept += kept
429
+ chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
430
+ self.n_chars += chars
431
+ self.n_chars_kept += kept_chars
432
+ return doc
433
+
434
+ def summary(self) -> List[str]:
435
+ summ = super().summary()
436
+ end_time = time.time()
437
+ n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
438
+ speed = n_docs / (end_time - self.start_time)
439
+ summ.append(
440
+ f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
441
+ )
442
+ selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
443
+ summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
444
+
445
+ n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
446
+ selectivity = n_chars_kept / n_chars if n_chars else 0
447
+ summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
448
+ return summ
449
+
450
+
451
+ def deduplicate(
452
+ file: jsonql.ReadableFileLike, field: str = "raw_content"
453
+ ) -> Iterable[dict]:
454
+ """Remove duplicates of the given file (but keep the first occurence)."""
455
+ dup_remover = DuplicatesRemover(field, [], collect=True)
456
+ return dup_remover.map(jsonql.read_jsons(file))
457
+
458
+
459
+ def deduplicate_two_pass(
460
+ file: jsonql.FileDescriptor, field: str = "raw_content"
461
+ ) -> Iterable[dict]:
462
+ """Remove duplicates of the given file (even removing the first occurence).
463
+
464
+ This is what is done in the paper, and in mine.py
465
+ """
466
+ try:
467
+ if isinstance(file, Path):
468
+ hash_file: Path = file.with_suffix(".bin")
469
+ else:
470
+ hash_file = jsonql._tmp(Path("hashes.bin"))
471
+ jsonql.run_pipes(
472
+ jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
473
+ )
474
+ dup_remover = DuplicatesRemover(field, [hash_file])
475
+ return dup_remover.map(jsonql.read_jsons(file))
476
+ finally:
477
+ if hash_file.exists():
478
+ hash_file.unlink()
cc-multilingual-main/cc_net/cc_net/execution.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ import functools
8
+ import itertools
9
+ import logging
10
+ import os
11
+ import sys
12
+ import time
13
+ import warnings
14
+ from pathlib import Path
15
+ from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
16
+
17
+ import submitit
18
+ from typing_extensions import Protocol
19
+ # import pdb
20
+ from concurrent.futures import ThreadPoolExecutor
21
+
22
+
23
+ class Executor(Protocol):
24
+ def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
25
+ ...
26
+
27
+
28
+ class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
29
+ def __init__(self, fn: Callable):
30
+ self.fn = fn
31
+ self.__name__ = fn.__name__
32
+
33
+ def __call__(self, *args, **kwargs):
34
+ return self.fn(*args, **kwargs)
35
+
36
+
37
+ def get_executor(
38
+ name: str,
39
+ log_dir: Path,
40
+ execution: str,
41
+ timeout_hour: float = 1.0,
42
+ mem_gb: int = 1,
43
+ cpus: int = 1,
44
+ task_parallelism: int = -1,
45
+ options: dict = {},
46
+ ) -> Executor:
47
+
48
+ execution_mode = execution.split(",")[0]
49
+ options.update(
50
+ {kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]}
51
+ )
52
+
53
+ if execution_mode == "mp":
54
+ warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
55
+ execution_mode = "local"
56
+
57
+ cluster = None if execution_mode == "auto" else execution_mode
58
+ # use submitit to detect which executor is available
59
+ ex = submitit.AutoExecutor(log_dir, cluster=cluster)
60
+ ex.parameters['timeout_min'] = int(timeout_hour * 60)
61
+
62
+ if ex.cluster == "local":
63
+ # LocalExecutor doesn't respect task_parallelism
64
+ return functools.partial(custom_map_array, ex, task_parallelism)
65
+ if ex.cluster == "debug":
66
+ return debug_executor
67
+ # pdb.set_trace()
68
+ # We are on slurm
69
+ if task_parallelism == -1:
70
+ task_parallelism = 500
71
+
72
+ ex.update_parameters(
73
+ name=name,
74
+ timeout_min=int(timeout_hour * 60),
75
+ mem_gb=mem_gb,
76
+ cpus_per_task=cpus,
77
+ slurm_array_parallelism=task_parallelism,
78
+ **options,
79
+ )
80
+ return functools.partial(map_array_and_wait, ex)
81
+
82
+
83
+ def map_array_and_wait(
84
+ ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable
85
+ ):
86
+ f_name = function.__name__
87
+
88
+ assert len(args) > 0, f"No arguments passed to {f_name}"
89
+ approx_length = _approx_length(*args)
90
+
91
+ print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
92
+ jobs = ex.map_array(function, *args)
93
+ if not jobs:
94
+ return
95
+ failed_jobs = []
96
+ done = 0
97
+ total = len(jobs)
98
+ job_array_id = jobs[0].job_id.split("_")[0]
99
+ # pdb.set_trace()
100
+ print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
101
+ for job in submitit.helpers.as_completed(jobs):
102
+ done += 1
103
+ e = job.exception()
104
+ if not e:
105
+ print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
106
+ continue
107
+
108
+ print(f"Failed job {job.job_id} ({done} / {total}):", e)
109
+ failed_jobs.append(job)
110
+
111
+ if failed_jobs:
112
+ n_failures = 10
113
+ message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
114
+ print(message)
115
+ for job in failed_jobs[:n_failures]:
116
+ print(f"Failed {job.job_id} -> {job.paths.stderr}")
117
+ if len(failed_jobs) > n_failures:
118
+ print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
119
+ raise Exception(message)
120
+
121
+
122
+ def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
123
+ logging.getLogger().setLevel(logging.DEBUG)
124
+ approx_length = _approx_length(*args)
125
+ for i, x in enumerate(zip(*args)):
126
+ try:
127
+ message = function(*x)
128
+ except Exception:
129
+ exit(1)
130
+ try:
131
+ import ipdb as pdb # type: ignore
132
+ except ImportError:
133
+ import pdb # type: ignore
134
+ import traceback
135
+
136
+ traceback.print_exc()
137
+ print("")
138
+ pdb.post_mortem()
139
+ sys.exit(1)
140
+ if message is not None:
141
+ print(message, f"({i + 1} / {approx_length})")
142
+
143
+ # def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
144
+ # logging.getLogger().setLevel(logging.DEBUG)
145
+ # approx_length = _approx_length(*args)
146
+ # with ThreadPoolExecutor(max_workers=4) as executor:
147
+ # futures = []
148
+ # for i, x in enumerate(zip(*args)):
149
+ # future = executor.submit(_execute_function, function, x, i + 1, approx_length)
150
+ # futures.append(future)
151
+ # for future in futures:
152
+ # future.result()
153
+
154
+ # def _execute_function(function: Callable[..., Optional[str]], args: tuple, index: int, total: int):
155
+ # try:
156
+ # message = function(*args)
157
+ # if message is not None:
158
+ # print(message, f"({index} / {total})")
159
+ # except Exception:
160
+ # # traceback.print_exc()
161
+ # sys.exit(1)
162
+
163
+ def _approx_length(*args: Iterable):
164
+ for a in args:
165
+ if isinstance(a, Sized):
166
+ return len(a)
167
+ return -1
168
+
169
+
170
+ def custom_map_array(
171
+ ex: submitit.AutoExecutor,
172
+ parallelism: int,
173
+ function: Callable[..., Optional[str]],
174
+ *args: Iterable,
175
+ ) -> None:
176
+ f_name = function.__name__
177
+ assert len(args) > 0, f"No arguments passed to {f_name}"
178
+
179
+ jobs_args = list(zip(*args))
180
+ total = len(jobs_args)
181
+ if parallelism < 0:
182
+ parallelism = os.cpu_count() or 0
183
+ assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
184
+ print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
185
+ enqueued = 0
186
+ done = 0
187
+ running_jobs: List[submitit.Job] = []
188
+ failed_jobs: List[submitit.Job] = []
189
+
190
+ while done < len(jobs_args):
191
+ # Try to queue more job if we have some bandwidth.
192
+ if enqueued < total and len(running_jobs) < parallelism:
193
+ running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
194
+ enqueued += 1
195
+ continue
196
+
197
+ # Else wait for some job to finish
198
+ if not running_jobs:
199
+ warnings.warn(
200
+ f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
201
+ )
202
+ break
203
+
204
+ job = get_next_job(running_jobs)
205
+ running_jobs.remove(job)
206
+ done += 1
207
+ e = job.exception()
208
+ if not e:
209
+ print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
210
+ continue
211
+
212
+ print(f"Failed job {job.job_id} ({done} / {total}):", e)
213
+ failed_jobs.append(job)
214
+
215
+ if failed_jobs:
216
+ n_failures = 10
217
+ message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
218
+ print(message)
219
+ for job in failed_jobs[:n_failures]:
220
+ print(f"Failed {job.job_id} -> {job.paths.stderr}")
221
+ if len(failed_jobs) > n_failures:
222
+ print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
223
+ raise Exception(message)
224
+
225
+
226
+ def get_next_job(
227
+ jobs: Sequence[submitit.Job], poll_frequency: float = 10
228
+ ) -> submitit.Job:
229
+ """
230
+ Waits for any of the job to finish and returns it.
231
+
232
+ jobs: list of jobs
233
+ poll_frequency: frequency in second at which we check job status
234
+ """
235
+ start = time.time()
236
+ waiting = False
237
+ while True:
238
+ for job in jobs:
239
+ if job.done():
240
+ return job
241
+ if not waiting:
242
+ job_ids = [j.job_id for j in jobs[:4]]
243
+ suffix = "..." if len(jobs) > 4 else ""
244
+ print(
245
+ f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
246
+ )
247
+ waiting = True
248
+ time.sleep(poll_frequency)
cc-multilingual-main/cc_net/cc_net/flat_hash_set.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ import sys
8
+ import time
9
+ import warnings
10
+ from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
11
+
12
+ import numpy as np
13
+
14
+ HASH_TYPE: Type[np.uint64] = np.uint64
15
+
16
+ GETPY_WARNING = False
17
+
18
+
19
+ class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
20
+ """A dict-like that returns `True` for keys that have been added more than once.
21
+
22
+ The API is batched and expect np.array as input. This batching grants better
23
+ perf when using the C++ implementation.
24
+ """
25
+
26
+ dtype: Type[np.uint64] = HASH_TYPE
27
+
28
+ def __repr__(self):
29
+ implementation = type(self).__name__
30
+ return f"[{implementation}, len: {len(self)}"
31
+
32
+ def __len__(self) -> int:
33
+ ...
34
+
35
+ def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
36
+ ...
37
+
38
+ def __getitem__(self, values) -> np.ndarray:
39
+ ...
40
+
41
+ def __setitem__(self, keys, values) -> None:
42
+ ...
43
+
44
+ def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
45
+ ...
46
+
47
+ def keys(self) -> Iterable[np.uint64]:
48
+ ...
49
+
50
+ def __iter__(self) -> Iterator[np.uint64]:
51
+ return iter(self.keys())
52
+
53
+ def add(self, h, contains=None):
54
+ """Add the given keys. First time a key is added the value is set to 0,
55
+ then it's set to one."""
56
+ if not isinstance(h, np.ndarray):
57
+ h = np.array(h, dtype=HASH_TYPE)
58
+ if contains is None:
59
+ contains = self.__contains__(h)
60
+
61
+ self.__setitem__(h, contains)
62
+ return contains
63
+
64
+ def merge(self, keys, values):
65
+ contains = self.__contains__(keys)
66
+ self.__setitem__(keys, contains | values)
67
+
68
+ def dump(self, filename):
69
+ return self.dump_np(filename)
70
+
71
+ def load(self, filename):
72
+ return self.load_np(filename)
73
+
74
+ def dump_np(self, filename):
75
+ kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
76
+ items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
77
+ with open(filename, "wb") as f:
78
+ np.save(f, items)
79
+
80
+ def load_np(self, filename):
81
+ items = np.load(str(filename))
82
+ keys = items["k"].copy()
83
+ values = items["v"].copy()
84
+ self.merge(keys, values)
85
+
86
+ def dump_np2(self, filename):
87
+ keys = np.fromiter(
88
+ (k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
89
+ )
90
+ with open(filename, "wb") as f:
91
+ np.save(f, keys)
92
+
93
+ values = np.fromiter(
94
+ (v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
95
+ )
96
+ with open(str(filename) + ".val", "wb") as f:
97
+ np.save(f, values)
98
+
99
+ def load_np2(self, filename):
100
+ keys = np.load(filename)
101
+ values = np.load(str(filename) + ".val")
102
+ self.merge(keys, values)
103
+
104
+
105
+ class NaiveHashSet(dict, AbstractDedupHashSet):
106
+ """Pure python implementation of AbstractDedupHashSet.
107
+
108
+ This implementation is quite fast, since Python dict are heavily optimized.
109
+ """
110
+
111
+ def __init__(self, iterable=None):
112
+ super().__init__()
113
+ global GETPY_WARNING
114
+ if GETPY_WARNING:
115
+ warnings.warn(
116
+ "Module 'getpy' not found. Deduplication will take more RAM."
117
+ " Try `pip install cc_net[getpy]"
118
+ )
119
+ GETPY_WARNING = False
120
+
121
+ def __contains__(self, values):
122
+ """Returns `True` if the object has been added at list once."""
123
+ contains_point = super().__contains__
124
+ return np.fromiter(
125
+ map(contains_point, values), count=len(values), dtype=np.uint8
126
+ )
127
+
128
+ def __getitem__(self, values):
129
+ """Returns `True` if the object has been added at list twice."""
130
+ get_point = super().get
131
+ return np.fromiter(
132
+ map(lambda x: get_point(x, False), values),
133
+ count=len(values),
134
+ dtype=np.uint8,
135
+ )
136
+
137
+ def __setitem__(self, keys, values):
138
+ assert len(keys) == len(values)
139
+ for k, v in zip(keys, values):
140
+ dict.__setitem__(self, k, v)
141
+
142
+
143
+ try:
144
+ import getpy as gp # type: ignore
145
+
146
+ class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
147
+ """C++ backed implementation of AbstractDedupHashSet.
148
+
149
+ This implementation is slightly slower than the Python one but uses
150
+ 3x less RAM.
151
+ See https://github.com/atom-moyer/getpy.
152
+ """
153
+
154
+ def __init__(self):
155
+ super().__init__(HASH_TYPE, np.uint8, default_value=False)
156
+
157
+ def __contains__(self, h):
158
+ """Returns `True` if the object has been added at list once."""
159
+ if not isinstance(h, np.ndarray):
160
+ h = np.array(h, dtype=HASH_TYPE)
161
+ c = gp.Dict.__contains__(self, h)
162
+ c.dtype = np.uint8
163
+ return c
164
+
165
+ def dump(self, filename):
166
+ return self.dump_gp(filename)
167
+
168
+ def load(self, filename):
169
+ return self.load_gp(filename)
170
+
171
+ def dump_gp(self, filename):
172
+ return gp.Dict.dump(self, str(filename))
173
+
174
+ def load_gp(self, filename):
175
+ """Override gp.Dict.load, to correctly merge values instead of overwriting."""
176
+ other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
177
+ other.load(str(filename))
178
+ n = len(other)
179
+ keys = np.fromiter(
180
+ (k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
181
+ )
182
+ values = np.fromiter(
183
+ (v for (k, v) in other.items()), dtype=np.uint8, count=n
184
+ )
185
+ self.merge(keys, values)
186
+
187
+ FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
188
+ except ImportError:
189
+ GETPY_WARNING = True
190
+ FlatHashSet = NaiveHashSet
191
+
192
+
193
+ def timeit(message, function, *args):
194
+ start = time.time()
195
+ function(*args)
196
+ end = time.time()
197
+ print(message, f"took {end - start:.0f}s")
198
+
199
+
200
+ def compare_load(*filenames):
201
+ assert filenames, "No file given"
202
+
203
+ def load_list():
204
+ hashes = []
205
+ for f in filenames:
206
+ h = FlatHashSet()
207
+ h.load(f)
208
+ print(f"Loaded {h} from {f}.")
209
+ hashes.append(h)
210
+ return hashes
211
+
212
+ def load_all(load, ext):
213
+ hashes = FlatHashSet()
214
+ for f in filenames:
215
+ load(hashes, f + ext)
216
+
217
+ def dump_all(hashes, dump, ext):
218
+ for h, f in zip(hashes, filenames):
219
+ dump(h, f + ext)
220
+
221
+ hashes = load_list()
222
+ dump_gp = getattr(FlatHashSet, "dump_gp")
223
+ if dump_gp is not None:
224
+ timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
225
+ timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
226
+ timeit(
227
+ "Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
228
+ )
229
+
230
+ load_gp = getattr(FlatHashSet, "load_gp")
231
+ if load_gp is not None:
232
+ timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
233
+ timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
234
+ timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
235
+
236
+ # Loading 10 shards:
237
+ # [dedup] Dumping using gp.dump took 52s
238
+ # [dedup] Dumping using dump_np took 270s
239
+ # [dedup] Dumping using dump_np2 took 483s
240
+ #
241
+ # [dedup] Loading using gp.load took 654s
242
+ # [dedup] Loading using load_np took 82s
243
+ # [dedup] Loading using load_np2 took 76s
244
+
245
+
246
+ if __name__ == "__main__":
247
+ compare_load(*sys.argv[1:])
cc-multilingual-main/cc_net/cc_net/get_wiki_cirrus.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Creates mono-lingual corpus from Wikipedia.
9
+ """
10
+
11
+ import functools
12
+ import re
13
+ import subprocess
14
+ import urllib.request
15
+ from pathlib import Path
16
+ from typing import Dict
17
+
18
+ import func_argparse
19
+ from bs4 import BeautifulSoup # type: ignore
20
+
21
+ from cc_net import jsonql, text_normalizer
22
+
23
+ CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
24
+ CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
25
+
26
+
27
+ def tmp(file: Path) -> Path:
28
+ return file.parent / ("tmp." + file.name)
29
+
30
+
31
+ def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
32
+ """Will dump the tokenized opening text of the given Wikipedia.
33
+
34
+ Args:
35
+ - file: File containing the Wikipedia dump.
36
+ - output: Output file.
37
+ - n_docs: How many docs to parse
38
+ - tokenize: whether to tokenize the text
39
+ - lang: Language code used to chose the tokenizer
40
+ """
41
+ assert file.exists()
42
+ return jsonql.run_pipes(
43
+ functools.partial(extract_opening_text, n_docs=n_docs),
44
+ file=file,
45
+ output=tmp(output) if output else None,
46
+ )
47
+ if output:
48
+ tmp(output).replace(output)
49
+
50
+
51
+ def extract_opening_text(source, n_docs: int = 10_000):
52
+ i = 0
53
+ for doc in jsonql.read_jsons(source):
54
+ if not doc:
55
+ continue
56
+
57
+ text = doc.get("opening_text")
58
+ if not text:
59
+ continue
60
+
61
+ yield text_normalizer.normalize(text)
62
+ i += 1
63
+ if i >= n_docs:
64
+ break
65
+
66
+
67
+ def dl(lang: str, output_dir: Path, date: str = None):
68
+ """Download the cirrus extract for the given lang.
69
+
70
+ See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
71
+
72
+ Args:
73
+ - lang: The Wikipedia code for the language.
74
+ - output_dir: Output directory. File will be `{lang}.json.gz`
75
+ - date: Date of a specific Cirrus dump.
76
+ """
77
+
78
+ urls = get_cirrus_urls(date)
79
+ assert (
80
+ lang in urls
81
+ ), f"--lang {lang} not found. Available languages are: {urls.keys()}"
82
+
83
+ assert output_dir, "--output_dir folder needed."
84
+ output_dir.mkdir(exist_ok=True)
85
+ output = output_dir / (lang + ".json.gz")
86
+ print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
87
+ wget(urls[lang], output)
88
+
89
+
90
+ def get_cirrus_urls(date: str = None) -> Dict[str, str]:
91
+ if date is None:
92
+ cirrus_page = BeautifulSoup(
93
+ urllib.request.urlopen(CIRRUS_URL), features="html.parser"
94
+ )
95
+ dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
96
+ dumps.remove("..")
97
+ dumps.remove("current")
98
+ # We take the oldest dump since the most recent might be incomplete.
99
+ # The page only link to the N latest dumps so the dump won't be too old.
100
+ date = min(dumps)
101
+
102
+ cirrus_url = "/".join((CIRRUS_URL, date))
103
+ print("Will use the Wikipedia dump from:", date, cirrus_url)
104
+ cirrus_page = BeautifulSoup(
105
+ urllib.request.urlopen(cirrus_url), features="html.parser"
106
+ )
107
+ urls = {}
108
+ for link in cirrus_page.findAll("a"):
109
+ match = CIRRUS_DUMP_RE.match(link.get("href"))
110
+ if not match:
111
+ continue
112
+
113
+ urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
114
+ assert urls, f"No valid download urls found at {cirrus_url}"
115
+ return urls
116
+
117
+
118
+ def wget(url: str, output: Path):
119
+ subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
120
+ tmp(output).replace(output)
121
+ assert (
122
+ output.stat().st_size > 10_000
123
+ ), f"File {output} downloaded from {url} looks too small"
124
+
125
+
126
+ if __name__ == "__main__":
127
+ func_argparse.main(dl, opening)
cc-multilingual-main/cc_net/cc_net/jsonql.py ADDED
@@ -0,0 +1,1340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Manipulate files containing one json per line.
9
+ """
10
+ import argparse
11
+ import collections
12
+ import contextlib
13
+ import functools
14
+ import glob
15
+ import gzip
16
+ import importlib
17
+ import inspect
18
+ import io
19
+ import itertools
20
+ import json
21
+ import logging
22
+ import multiprocessing
23
+ import os
24
+ import re
25
+ import sys
26
+ import tempfile
27
+ import time
28
+ import typing as tp
29
+ import warnings
30
+ import zlib
31
+ from pathlib import Path
32
+ from typing import (
33
+ Callable,
34
+ Dict,
35
+ Iterable,
36
+ Iterator,
37
+ List,
38
+ Optional,
39
+ Sequence,
40
+ TextIO,
41
+ Tuple,
42
+ Union,
43
+ )
44
+
45
+ import numpy as np
46
+ import psutil # type: ignore
47
+ import requests
48
+ from typing_extensions import Protocol
49
+
50
+ logging.basicConfig(
51
+ level=logging.INFO,
52
+ format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
53
+ datefmt="%Y-%m-%d %H:%M",
54
+ )
55
+
56
+ NEWLINE = " N3WL1N3 "
57
+
58
+ FilterFn = Callable[[dict], bool]
59
+ FileDescriptor = Union[Path, List[Path], str]
60
+ WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
61
+ ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
62
+
63
+
64
+ def io_parser():
65
+ """Parser shared by all commands to get input/output files."""
66
+ parser = argparse.ArgumentParser(add_help=False)
67
+ file_help = """File to read from. Can be specified several times for several files.
68
+ Be careful that bash will expand glob patterns **before** sending the args
69
+ to python. To use globs put it inside single quotes:
70
+ jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
71
+ jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
72
+ [Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
73
+ [Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
74
+ """
75
+ parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
76
+ parser.add_argument("-o", "--output", type=Path, default="-")
77
+ parser.add_argument("--processes", type=int, default=1)
78
+ return parser
79
+
80
+
81
+ def get_parser():
82
+ parser = argparse.ArgumentParser(
83
+ description="Read a set of json files and allow to query them"
84
+ )
85
+ subparsers = parser.add_subparsers()
86
+
87
+ def add_subparser(function, arguments):
88
+ doc = function.__doc__.split("\n")[0]
89
+ p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
90
+ p.set_defaults(command=function)
91
+ for k, v in arguments.items():
92
+ p.add_argument(k, **v)
93
+
94
+ add_subparser(
95
+ select,
96
+ {
97
+ "columns": dict(nargs="+", help="Extract the value of the given fields"),
98
+ "--skip_empty": dict(
99
+ action="store_true", help="Skip lines without the requested fields"
100
+ ),
101
+ "--separator": dict(
102
+ default="\t", help="Separator to use between the different columns"
103
+ ),
104
+ "--newline": dict(
105
+ default=NEWLINE,
106
+ help="Replace newlines found in the text by the given string",
107
+ ),
108
+ },
109
+ )
110
+
111
+ add_subparser(
112
+ where,
113
+ {
114
+ "clauses": dict(nargs="+", help=""),
115
+ "--requires": dict(
116
+ action="append", help="Python module required by the clauses code."
117
+ ),
118
+ },
119
+ )
120
+
121
+ add_subparser(
122
+ merge,
123
+ {
124
+ "columns": dict(nargs="+", help=""),
125
+ "--separator": dict(
126
+ default="\t", help="Separator to use between the different columns"
127
+ ),
128
+ "--newline": dict(
129
+ default=NEWLINE, help="Replace the given string by actual newlines"
130
+ ),
131
+ },
132
+ )
133
+
134
+ add_subparser(
135
+ describe,
136
+ {
137
+ "columns": dict(nargs="*", help=""),
138
+ "--bins": dict(
139
+ default="auto", help="Number of bins for computing the histograms"
140
+ ),
141
+ "--cumulative": dict(
142
+ action="store_true", help="Compute cumulative histograms"
143
+ ),
144
+ "--weights": dict(type=str, help="Column used to weight histograms"),
145
+ },
146
+ )
147
+
148
+ add_subparser(split, {"--pattern": dict(type=str)})
149
+ add_subparser(shard, {})
150
+ return parser
151
+
152
+
153
+ def _split_array(array, sep):
154
+ last = 0
155
+ for i, x in enumerate(array):
156
+ if x != sep:
157
+ continue
158
+ yield array[last:i]
159
+ last = i + 1
160
+ if last != len(array):
161
+ yield array[last:]
162
+
163
+
164
+ def main(raw_args):
165
+ parser = get_parser()
166
+ pipeline = []
167
+ file = "-"
168
+ output = "-"
169
+ processes = 1
170
+
171
+ for args_group in _split_array(raw_args, "--"):
172
+ args = vars(parser.parse_args(args_group))
173
+ command = args.pop("command")
174
+ file = args.pop("file") or file
175
+ output = args.pop("output") or output
176
+ processes = args.pop("processes") or processes
177
+ pipeline.append(as_pipe(command, args))
178
+
179
+ if not pipeline:
180
+ parser.print_help()
181
+ return
182
+
183
+ run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
184
+
185
+
186
+ class Transformer:
187
+ """
188
+ Wrapper around functions transforming documents.
189
+
190
+ This allows `run_pipes` to automatically parallelize the pipeline.
191
+ Provides:
192
+ * Automatic logging. Logging can be changed with the `summary` method.
193
+ Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
194
+ * Automatic parallelization without pickling. The transformers are shared
195
+ across processes, and the object is usually not pickled.
196
+ * Basic pickling / unpickling in case it's still needed.
197
+ By default will only pickle the arguments passed to the constructor.
198
+ * Delayed initialization. Internal state which is not pickable should be set
199
+ inside the `_prepare` function.
200
+ """
201
+
202
+ parallelisable: bool = True
203
+ expect_json: bool = False
204
+ warn_when_pickling: bool = False
205
+ ready: bool = False
206
+
207
+ def __init_subclass__(cls, expect_json: bool = None):
208
+ """Detects if the subclass expects json as input."""
209
+ spec = inspect.getfullargspec(cls.do)
210
+ if expect_json is None:
211
+ expect_json = spec.annotations.get(spec.args[1], None) == dict
212
+
213
+ cls.expect_json = expect_json
214
+
215
+ def __new__(cls, *args, **kwargs):
216
+ """Creates the transformer and save the arguments passed to the constructor."""
217
+ t = super().__new__(cls)
218
+ Transformer.__init__(t, args, kwargs)
219
+ return t
220
+
221
+ def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
222
+ """
223
+ Init the transformer counters.
224
+
225
+ If state_args/state_kwargs are set they will override whatever was
226
+ originally passed to the subclass constructor.
227
+ """
228
+ if state_args is not None:
229
+ self.__args = state_args
230
+ if state_kwargs is not None:
231
+ self.__kwargs = state_kwargs
232
+
233
+ self.start_time = time.time()
234
+ self.__last_log = self.start_time
235
+ self.processed = 0
236
+ # Log every 5 min unless specified other wise.
237
+ self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
238
+ self.__cls = type(self)
239
+ self._logger = logging.getLogger(self.__cls.__name__)
240
+
241
+ def __call__(self, x):
242
+ assert self.ready, f"{self} is not ready."
243
+ if x is None:
244
+ return
245
+ y = self.do(x)
246
+ self.processed += 1
247
+ if time.time() - self.__last_log > self._log_freq:
248
+ self.log_summary()
249
+ return y
250
+
251
+ def do(self, x):
252
+ raise NotImplementedError(f"'do' not implemented in {type(self)}")
253
+
254
+ def summary(self) -> List[str]:
255
+ return [self.speed_summary()]
256
+
257
+ def speed_summary(self) -> str:
258
+ delay = time.time() - self.start_time
259
+ h = delay / 3600
260
+ s = self.processed / delay
261
+ return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
262
+
263
+ def log(self, message):
264
+ self._logger.info(message)
265
+
266
+ def log_summary(self) -> None:
267
+ if not self.ready:
268
+ self.log("Not ready.")
269
+ return
270
+ summ = self.summary() or []
271
+ for line in summ:
272
+ self.log(line)
273
+ self.__last_log = time.time()
274
+
275
+ def map(self, source: Iterable) -> Iterator:
276
+ if self.ready:
277
+ for x in source:
278
+ yield self(x)
279
+ # since we have been prepared by caller,
280
+ # caller is also responsible for calling `close`.
281
+ return
282
+ else:
283
+ with self:
284
+ for x in source:
285
+ yield self(x)
286
+
287
+ def __getstate__(self) -> Tuple[tuple, dict, bool]:
288
+ return (self.__args, self.__kwargs, self.expect_json)
289
+
290
+ def __setstate__(self, state: Tuple[tuple, dict, bool]):
291
+ if self.warn_when_pickling:
292
+ warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
293
+ (args, kwargs, expect_json) = state
294
+ # When unpickling `__new__` isn't called so we have to doit ourselves.
295
+ Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
296
+ type(self).__init__(self, *args, **kwargs)
297
+ assert self.expect_json == expect_json
298
+ # __setstate__ is called by multiprocessing right before calling
299
+ # the object so we need to initialize everything.
300
+ self.__enter__()
301
+
302
+ def _prepare(self) -> None:
303
+ pass
304
+
305
+ def __enter__(self) -> "Transformer":
306
+ # In multiprocessing __enter__ is always called twice, so we are idempotent.
307
+ # Because we call __enter__ when deserializing this transformer and
308
+ # also when the parent transformer is deserialized.
309
+ self.start_time = time.time()
310
+ if self.ready:
311
+ return self
312
+ self._prepare()
313
+ self.ready = True
314
+ return self
315
+
316
+ def __exit__(self, *args) -> None:
317
+ self.close()
318
+ self.log_summary()
319
+
320
+ def close(self) -> None:
321
+ pass
322
+
323
+
324
+ def as_pipe(transformer, kwargs):
325
+ if isinstance(transformer, type):
326
+ return transformer(**kwargs)
327
+ return lambda source: transformer(source, **kwargs)
328
+
329
+
330
+ def compose(fns: List[Transformer]) -> Transformer:
331
+ if len(fns) == 1:
332
+ return fns[0]
333
+ return MultiTransformer(fns)
334
+
335
+
336
+ class MultiTransformer(Transformer):
337
+ def __init__(self, transformers: List[Transformer]):
338
+ super().__init__()
339
+ self.transformers = transformers
340
+
341
+ def __repr__(self) -> str:
342
+ pipeline = " | ".join(type(t).__name__ for t in self.transformers)
343
+ return f"<{pipeline}>"
344
+
345
+ def do(self, x):
346
+ for t in self.transformers:
347
+ x = t(x)
348
+ return x
349
+
350
+ def _prepare(self):
351
+ for t in self.transformers:
352
+ t.__enter__()
353
+ return self
354
+
355
+ def __exit__(self, *args):
356
+ for t in self.transformers:
357
+ t.__exit__(*args)
358
+
359
+ def summary(self):
360
+ return itertools.chain(*(t.summary() for t in self.transformers))
361
+
362
+
363
+ class Mapper(Transformer):
364
+ def __init__(self, fn):
365
+ super().__init__()
366
+ self.fn = fn
367
+
368
+ def do(self, x):
369
+ return self.fn(x)
370
+
371
+
372
+ def run_pipe(
373
+ command,
374
+ kwargs: dict = None,
375
+ file: ReadableFileLike = None,
376
+ output: WritableFileLike = None,
377
+ ):
378
+ kwargs = kwargs or {}
379
+ if isinstance(kwargs, argparse.ArgumentParser):
380
+ kwargs = vars(kwargs.parse_args())
381
+ file = file or Path(kwargs.pop("file", "-"))
382
+ output = output or Path(kwargs.pop("output", "-"))
383
+
384
+ return run_pipes(as_pipe(command, kwargs), file=file, output=output)
385
+
386
+
387
+ def run_pipes(
388
+ *fns: Union[Transformer, Callable[[Iterable], Iterable]],
389
+ inputs: Iterable[dict] = None,
390
+ file: ReadableFileLike = None,
391
+ output: WritableFileLike = None,
392
+ processes: int = 1,
393
+ chunksize: int = 10_000,
394
+ ):
395
+ """
396
+ Run full document processing pipeline.
397
+
398
+ - fns: list of functions to run over the documents. Can be:
399
+ * `Iterable -> Iterable` function
400
+ * jsonql.Transformer instance
401
+ Using transformers allow the pipeline to process documents in parallel.
402
+ - inputs: iterable to read the documents from
403
+ - file: if inputs is not given, will read documents from this file.
404
+ - output: writable file like.
405
+ - processes: number of processes to use. -1 means all CPU available.
406
+ - chunksize: chunksize for multiprocessing.Pool.imap_unordered
407
+ """
408
+ expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
409
+ if expect_json and inputs is None:
410
+ fns = (JsonReader(),) + fns
411
+ transformers = []
412
+ for t in fns:
413
+ if not isinstance(t, Transformer):
414
+ break
415
+ if not t.parallelisable:
416
+ break
417
+ transformers.append(t)
418
+ pipes = fns[len(transformers) :]
419
+
420
+ log = logging.getLogger(__name__).info
421
+ if inputs is None:
422
+ data: Iterable = open_read(file)
423
+ else:
424
+ data = inputs
425
+
426
+ if processes == -1:
427
+ processes = os.cpu_count() or 0
428
+
429
+ with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
430
+ if transformers:
431
+ log(f"preparing {transformers}")
432
+ transform = stack.enter_context(compose(transformers))
433
+ if processes <= 1:
434
+ data = transform.map(data)
435
+ else:
436
+ p = multiprocessing.current_process()
437
+ log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
438
+ pool = stack.enter_context(
439
+ multiprocessing.Pool(
440
+ processes=processes,
441
+ initializer=_set_global_transformer,
442
+ initargs=(transform,),
443
+ )
444
+ )
445
+ data = pool.imap_unordered(
446
+ _global_transformer, data, chunksize=chunksize
447
+ )
448
+
449
+ for fn in pipes:
450
+ if isinstance(fn, Transformer):
451
+ data = fn.map(data)
452
+ else:
453
+ data = fn(data)
454
+
455
+ write_jsons(data, output)
456
+
457
+
458
+ # Allows to share transformer acroos subprocess.
459
+ # Used by `run_pipes`
460
+ _GLOBAL_TRANSFORMER: Optional[Transformer] = None
461
+
462
+
463
+ def _set_global_transformer(transformer: Transformer):
464
+ global _GLOBAL_TRANSFORMER
465
+ p = multiprocessing.current_process()
466
+ logging.info(
467
+ f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
468
+ )
469
+ assert transformer.ready, f"{transformer} isn't ready"
470
+ _GLOBAL_TRANSFORMER = transformer
471
+
472
+
473
+ def _global_transformer(document: str) -> Optional[dict]:
474
+ assert _GLOBAL_TRANSFORMER is not None
475
+ return _GLOBAL_TRANSFORMER(document)
476
+
477
+
478
+ def lines(file: ReadableFileLike) -> Iterator[str]:
479
+ return (line.strip("\n") for line in open_read(file))
480
+
481
+
482
+ def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
483
+ reader = JsonReader(strict=strict)
484
+ lines = open_read(file)
485
+ for line in lines:
486
+ if line is None:
487
+ continue
488
+ yield reader(line)
489
+
490
+ reader.log_summary()
491
+
492
+
493
+ def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
494
+ eol = os.linesep
495
+ with open_write(file) as o:
496
+ for res in source:
497
+ if res is None:
498
+ continue
499
+ if isinstance(res, dict):
500
+ json.dump(res, o, ensure_ascii=False)
501
+ o.write(eol)
502
+ continue
503
+ if isinstance(res, str):
504
+ res = res.rstrip("\n")
505
+ print(res, file=o)
506
+
507
+
508
+ class JsonReader(Transformer):
509
+ def __init__(self, strict: bool = False):
510
+ super().__init__()
511
+ self.ready = True
512
+ self.strict = strict
513
+ self.num_errors = 0
514
+
515
+ def do(self, line: str) -> Optional[dict]:
516
+ if line is None:
517
+ return None
518
+ if isinstance(line, dict):
519
+ return line
520
+ line = line.rstrip("\n")
521
+ if not line:
522
+ return None
523
+ try:
524
+ return json.loads(line)
525
+ except json.decoder.JSONDecodeError as e:
526
+ self.log_error(e)
527
+ if self.strict:
528
+ raise
529
+ return None
530
+
531
+ def log_error(self, e: json.decoder.JSONDecodeError):
532
+ self.num_errors += 1
533
+ if self.num_errors > 10:
534
+ return
535
+
536
+ MAX_LEN = 80
537
+ snippet, snippet_len = e.doc, len(e.doc)
538
+ col = e.pos
539
+ if snippet_len > MAX_LEN:
540
+ if col < MAX_LEN:
541
+ start = 0
542
+ elif snippet_len - col < MAX_LEN:
543
+ start = snippet_len - MAX_LEN
544
+ else:
545
+ start = col - MAX_LEN // 2
546
+ snippet = e.doc[start : start + MAX_LEN]
547
+ col = col - start
548
+ logging.warning(
549
+ "\n".join(
550
+ [
551
+ f"Invalid json (length={len(e.doc)}) {e}",
552
+ snippet,
553
+ " " * (col - 1) + "^",
554
+ ]
555
+ )
556
+ )
557
+
558
+ def summary(self):
559
+ summ = super().summary()
560
+ if self.num_errors > 0:
561
+ summ.append(f"Skipped {self.num_errors} invalid json.")
562
+ return summ
563
+
564
+
565
+ def compile_column(column, newline):
566
+ if callable(column):
567
+ return column
568
+
569
+ if column == "*":
570
+ return json.dumps
571
+
572
+ if re.match(r"[_a-z][_a-z0-9]*", column):
573
+
574
+ def extract_col(doc):
575
+ v = doc.get(column, "")
576
+ if isinstance(v, str) and newline != "\n":
577
+ v = v.rstrip("\n").replace("\n", newline)
578
+ return v
579
+
580
+ return extract_col
581
+
582
+ return compile_expr(column)
583
+
584
+
585
+ def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
586
+ """Yields the content of the requested columns."""
587
+ column_parsers = [compile_column(c, newline) for c in columns]
588
+ for doc in read_jsons(lines):
589
+ values = []
590
+ empty = True
591
+ for parse_col in column_parsers:
592
+ v = parse_col(doc)
593
+ values.append(str(v) or "")
594
+ empty = empty and v is None
595
+
596
+ if skip_empty and empty:
597
+ continue
598
+
599
+ yield separator.join(values)
600
+
601
+
602
+ def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
603
+ if not isinstance(clause, str):
604
+ return clause
605
+
606
+ args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
607
+ args_list = list(re.findall(args_re, clause))
608
+ if not args_list:
609
+ # This is only a warning because you may want to have eg random sampling
610
+ # that doesn't depend on the document.
611
+ logging.warn(
612
+ f"Warning: No variable found in expression: <{clause}>\n"
613
+ "Variables should be written inside braces, eg: {language}=='en'"
614
+ )
615
+ python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
616
+ requires = requires or []
617
+ modules = {r: importlib.import_module(r) for r in requires}
618
+ return eval(f"lambda doc: {python_like}", modules)
619
+
620
+
621
+ class where(Transformer):
622
+ """Filters the data using python code.
623
+
624
+ Ex: `jsonql where 'len({text}) > 100'`
625
+ """
626
+
627
+ def __init__(
628
+ self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
629
+ ):
630
+ super().__init__()
631
+ self.raw_clauses = clauses
632
+ self.requires = requires
633
+ self.n_selected = 0
634
+ self.clauses: List[FilterFn] = []
635
+
636
+ def _prepare(self):
637
+ self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
638
+
639
+ def do(self, doc: dict) -> Optional[dict]:
640
+ assert self.clauses
641
+ if not doc or not all((c(doc) for c in self.clauses)):
642
+ return None
643
+ self.n_selected += 1
644
+ return doc
645
+
646
+ def summary(self):
647
+ n_selected, n_docs = self.n_selected, self.processed
648
+ selectivity = n_selected / n_docs if n_docs else 0
649
+ return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
650
+
651
+
652
+ def merge(lines, columns, separator="\t", newline=NEWLINE):
653
+ """Reads tab separated columns and output a json using the given headers.
654
+
655
+ Headers are of form {key}[%{type}]
656
+ {type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
657
+ Default type is string.
658
+ A special header "_" means interpret this column as json, and append all other
659
+ columns to it. Must appear only once and on last position.
660
+
661
+ Ex:
662
+ `echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
663
+ `echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
664
+ `echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
665
+ """
666
+ handle_newlines = lambda s: s.replace(newline, "\n")
667
+ type_mapping: Dict[str, Callable] = {
668
+ "f": float,
669
+ "i": int,
670
+ "b": bool,
671
+ "s": handle_newlines,
672
+ }
673
+ type_parsing = [
674
+ type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
675
+ ]
676
+ columns = [f.split("%")[0] for f in columns]
677
+ doc_index = columns.index("_") if "_" in columns else -1
678
+ read_json = JsonReader()
679
+
680
+ def parse(line):
681
+ parts = line.split(separator, len(columns) - 1)
682
+ doc: Dict[str, tp.Any] = {}
683
+ for i, value in enumerate(parts):
684
+ if columns[i] == "_":
685
+ doc.update(read_json(parts[doc_index]))
686
+ else:
687
+ try:
688
+ doc[columns[i]] = type_parsing[i](value)
689
+ except ValueError:
690
+ logging.error(
691
+ f"Error when parsing column {i} of line: {line[:100]}..."
692
+ )
693
+ return doc
694
+
695
+ for line in lines:
696
+ yield json.dumps(parse(line))
697
+
698
+
699
+ class split(Transformer):
700
+ """Split a files in several smaller files based on the value of a field."""
701
+
702
+ # Not parallelisable since we are writing to files.
703
+ parallelisable = False
704
+
705
+ def __init__(
706
+ self,
707
+ pattern: Union[Path, str] = None,
708
+ split_fn: Callable[[dict], str] = None,
709
+ mkdir: bool = False,
710
+ ):
711
+ super().__init__()
712
+ assert not (
713
+ pattern and split_fn
714
+ ), "split can't have both a pattern and a split_fn"
715
+ if split_fn is not None:
716
+ self.split_fn = split_fn
717
+ else:
718
+ assert pattern, "split need either a pattern or a split_fn"
719
+ self.split_fn = self.make_split_fn(str(pattern))
720
+ self.mkdir = mkdir
721
+ self.o: dict = {}
722
+
723
+ def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
724
+ candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
725
+ return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
726
+
727
+ def do(self, doc):
728
+ filename = self.split_fn(doc)
729
+ if not filename:
730
+ return
731
+ o = self.o.get(filename, None)
732
+ if o is None:
733
+ if self.mkdir:
734
+ Path(filename).parent.mkdir(parents=True, exist_ok=True)
735
+ self.o[filename] = open_write(filename)
736
+ print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
737
+
738
+ def summary(self):
739
+ summ = super().summary()
740
+ summ.append(f"Found {len(self.o)} splits.")
741
+ return summ
742
+
743
+ def close(self):
744
+ for file in self.o.values():
745
+ file.close()
746
+
747
+
748
+ def histogram(values, bins, weights):
749
+ hist, bins = np.histogram(values, bins=bins)
750
+ # n_bins = len(hist)
751
+
752
+ if weights is not None:
753
+ # Bins can't be auto-determined if weights is supplied.
754
+ # So we first compute the bins without the weights then recompute
755
+ # the histogram with the weights.
756
+ hist, bins = np.histogram(values, bins=bins, weights=weights)
757
+ # cumsum = np.cumsum(hist)
758
+ # total = cumsum[-1]
759
+
760
+ # for i in range(n_bins - 1):
761
+ # if cumsum[i] / total > 0.9:
762
+ # useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
763
+ # new_bins = np.append(useful_range, [bins[-1]])
764
+ # return np.histogram(values, bins=new_bins, weights=weights)
765
+
766
+ return hist, bins
767
+
768
+
769
+ def _parse_bins(bins):
770
+ try:
771
+ if isinstance(bins, str):
772
+ if "," in bins:
773
+ bins = [int(b) for b in bins.split(",")]
774
+ else:
775
+ bins = int(bins)
776
+ except ValueError:
777
+ pass
778
+ return bins
779
+
780
+
781
+ ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
782
+ MAX_LABEL_LEN = 100
783
+
784
+
785
+ def bar_chart(hist, bins):
786
+ n = sum(hist)
787
+ max_h = max(hist)
788
+ out = []
789
+ for i, h in enumerate(hist):
790
+ h_size = 80 * h // max_h
791
+ dh_size = 80 * (h - hist[i - 1]) // max_h
792
+ if h_size == 0 or dh_size == 0:
793
+ continue
794
+ bar = "█" * h_size
795
+ out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
796
+ out.append(f"{bins[-1]:8.3f}")
797
+ return out
798
+
799
+
800
+ def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
801
+ out = []
802
+ documents = stats[ALL_DOCUMENTS]
803
+ count = stats.get(key, 0)
804
+ r = count / documents if documents else 0
805
+ out.append(f"Field {key} saw {count} times ({r:5.1%})")
806
+
807
+ length = stats.get(key + ".length", None)
808
+ avg_length = length // count if length else 0
809
+ if length is not None:
810
+ out[-1] += f", average length is {length // count}"
811
+
812
+ values = stats.get(key + ".val", None)
813
+ if values:
814
+ out[-1] += f", histogram is: (bins={bins})"
815
+ if weights:
816
+ if weights not in stats:
817
+ logging.warn(f"Warning: weights column {weights} not found.")
818
+ if weights + ".val" not in stats:
819
+ logging.warn(
820
+ f"Warning: weights column {weights} is not a numeric column."
821
+ )
822
+ weights = stats.get(weights + ".val")
823
+ hist, bins = histogram(values, _parse_bins(bins), weights)
824
+ if cumulative:
825
+ hist = np.cumsum(hist)
826
+ out += bar_chart(hist, bins)
827
+
828
+ cnt = stats.get(key + ".cnt", None)
829
+ if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
830
+ cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
831
+ out[-1] += ", top 100 labels:"
832
+ for label, n in cnt[:100]:
833
+ if n < 5:
834
+ continue
835
+ out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
836
+
837
+ return out
838
+
839
+
840
+ def describe(source, columns=None, weights=None, **kwargs):
841
+ """Compute some statistics about a dataset.
842
+
843
+ Stats can be restricted to a subset of columns."""
844
+ MAX_HIST_SIZE = 100_000_000
845
+ MAX_CNT_SIZE = 1000
846
+ stats = {ALL_DOCUMENTS: 0}
847
+ needed = columns + [weights] if columns else None
848
+
849
+ for doc in read_jsons(source):
850
+ stats[ALL_DOCUMENTS] += 1
851
+ for k, v in doc.items():
852
+ if needed and k not in needed:
853
+ continue
854
+ stats[k] = get_or_set(stats, k, 0) + 1
855
+ if isinstance(v, str):
856
+ stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
857
+ if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
858
+ continue
859
+ cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
860
+ if v in cnt or len(cnt) < MAX_CNT_SIZE:
861
+ cnt[v] += 1
862
+ elif type(v) in (int, float):
863
+ values = get_or_set(stats, k + ".val", [])
864
+ if len(values) < MAX_HIST_SIZE:
865
+ values.append(v)
866
+ elif type(v) is list and len(v) and type(v[0]) in (int, float):
867
+ values = get_or_set(stats, k + ".val", [])
868
+ if len(values) < MAX_HIST_SIZE:
869
+ values += v
870
+ elif type(v) is dict:
871
+ cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
872
+ for label in v:
873
+ if label in cnt or len(cnt) < MAX_CNT_SIZE:
874
+ cnt[label] += 1
875
+
876
+ documents = stats[ALL_DOCUMENTS]
877
+ yield f"Stats computed on {documents} documents:"
878
+ for k in stats:
879
+ if columns and k not in columns:
880
+ continue
881
+ if "." in k or k == ALL_DOCUMENTS:
882
+ continue
883
+ for line in display_stats(stats, k, weights=weights, **kwargs):
884
+ yield line
885
+
886
+
887
+ def shard(lines):
888
+ """Shard a file in several smaller ones."""
889
+ # The creation of the shard is handle in a generic way. Do we need this ?
890
+ return lines
891
+
892
+
893
+ # *** Utils ***
894
+
895
+
896
+ def get_or_set(dictionary, key, default):
897
+ if key not in dictionary:
898
+ dictionary[key] = default
899
+ return dictionary[key]
900
+
901
+
902
+ class SimpleIO(Protocol):
903
+ """A subset of methods from TextIO."""
904
+
905
+ def close(self) -> None:
906
+ ...
907
+
908
+ def write(self, line: str) -> int:
909
+ ...
910
+
911
+ def __enter__(self) -> "SimpleIO":
912
+ ...
913
+
914
+ def __exit__(self, exc_type, exc_value, traceback):
915
+ ...
916
+
917
+
918
+ def open_read(filename: ReadableFileLike) -> Iterable[str]:
919
+ """Open the given file, list of files or files matching the given glob and read lines.
920
+
921
+ `filename` is None or "-" -> reads from stdin
922
+ `filename` is a Path / str -> interprets filename as a glob and open files matching it
923
+ `filename` is a list -> opens sequentially all files from the list using `open_read`
924
+ `filename` is something else -> returns the object wrapped in a `nullcontext`
925
+ This allows to pass already openened files or iterables.
926
+
927
+ `open_read` will decompress gzip files, given they have ".gz" suffix.
928
+ """
929
+ if filename is None:
930
+ return sys.stdin
931
+
932
+ if isinstance(filename, list):
933
+ assert isinstance(filename[0], Path)
934
+ if len(filename) == 0:
935
+ return []
936
+ if len(filename) > 1:
937
+ return _yield_from(filename)
938
+ filename = tp.cast(Path, filename[0])
939
+ if isinstance(filename, str):
940
+ if filename.startswith("http://") or filename.startswith("https://"):
941
+ return open_remote_file(filename)
942
+
943
+ filename = Path(filename)
944
+ if not isinstance(filename, Path):
945
+ # we might have received an iterable, return it unmodified.
946
+ return filename # type: ignore
947
+
948
+ # Expand glob patterns only when reading
949
+ files = [Path(f) for f in sorted(glob.glob(str(filename)))]
950
+ if len(files) > 1:
951
+ return _yield_from(files)
952
+ if len(files) == 1:
953
+ filename = files[0]
954
+
955
+ assert isinstance(filename, Path)
956
+
957
+ if filename.name.endswith("]"):
958
+ return block_reader(filename)
959
+
960
+ logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
961
+ if filename.suffix == ".gz":
962
+ file: TextIO = gzip.open(filename, "rt") # type: ignore
963
+ else:
964
+ file = open(filename, "rt")
965
+
966
+ return _close_when_exhausted(file)
967
+
968
+
969
+ def _close_when_exhausted(file: TextIO) -> Iterable[str]:
970
+ with file:
971
+ yield from file
972
+
973
+
974
+ def _yield_from(files: list) -> Iterable[str]:
975
+ for file in files:
976
+ yield from open_read(file)
977
+
978
+
979
+ def open_write(
980
+ filename: WritableFileLike, max_size: str = "4G"
981
+ ) -> tp.ContextManager[TextIO]:
982
+ """Open the given file, list of files or files matching the given glob.
983
+
984
+ The return value is a ContextManager meant to be used inside a `with` block:
985
+ ```
986
+ with open_write("foo.txt") as o:
987
+ ...
988
+
989
+ Write mode:
990
+ replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
991
+ If filename ends with ".gz", creates a blocked gzip file with random access.
992
+ """
993
+ if filename is None:
994
+ return contextlib.nullcontext(sys.stdout)
995
+
996
+ if isinstance(filename, list):
997
+ if len(filename) > 1:
998
+ return MultiFile(filename, "w", max_size)
999
+ else:
1000
+ filename = tp.cast(Path, filename[0])
1001
+ if isinstance(filename, str):
1002
+ filename = Path(filename)
1003
+ if not isinstance(filename, Path):
1004
+ assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
1005
+ # We return a 'TextIO' even though we only check for `.write` method,
1006
+ # this works better with eg `print`.
1007
+ return contextlib.nullcontext(tp.cast(TextIO, filename))
1008
+
1009
+ mode = "wt"
1010
+ if "?" in filename.name:
1011
+ return sharded_file(filename, mode, max_size)
1012
+
1013
+ logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
1014
+ # TODO: should we use another format ?
1015
+ if filename.suffix == ".gz":
1016
+ return BlockedGzipWriter(Path(filename), mode, block_size="64M")
1017
+
1018
+ return open(filename, "wt")
1019
+
1020
+
1021
+ def parse_size(size):
1022
+ unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
1023
+ unit = size[-1].upper()
1024
+ assert (
1025
+ unit in unit_map
1026
+ ), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
1027
+ return int(size[:-1]) * unit_map[unit]
1028
+
1029
+
1030
+ class MultiFile(SimpleIO):
1031
+ def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
1032
+ self.name = str(files)
1033
+ self.mode = mode
1034
+ self.files = iter(files)
1035
+ self.max_size = parse_size(max_size)
1036
+ self.current_handle: Optional[TextIO] = None
1037
+ self.current_block_size = 0
1038
+ self._open_next_handle() # Opening 1st handle allows to write directly.
1039
+
1040
+ def write(self, content) -> int:
1041
+ # Avoid splitting newlines to a new file.
1042
+ # use current_block_size since it's faster than `tell()`
1043
+ if content != "\n" and self.current_block_size >= self.max_size:
1044
+ self._open_next_handle()
1045
+ if self.current_handle is None:
1046
+ raise Exception("No more files to write to...")
1047
+
1048
+ written = self.current_handle.write(content)
1049
+ self.current_block_size += written
1050
+ return written
1051
+
1052
+ def _open_next_handle(self) -> bool:
1053
+ self.close()
1054
+ file = next(self.files, None)
1055
+ if file is None:
1056
+ return False
1057
+
1058
+ self.current_handle = open_write(file).__enter__()
1059
+ self.current_block_size = 0
1060
+ return True
1061
+
1062
+ def __enter__(self):
1063
+ return self
1064
+
1065
+ def __exit__(self, *exc_info):
1066
+ self.close()
1067
+
1068
+ @property
1069
+ def closed(self):
1070
+ return self.current_handle is None
1071
+
1072
+ def close(self):
1073
+ if self.current_handle is None:
1074
+ return
1075
+
1076
+ # log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
1077
+ self.current_handle.__exit__(None, None, None)
1078
+ self.current_handle = None
1079
+
1080
+
1081
+ # not sure it helps since connections are reseted anyway.
1082
+ _session = functools.lru_cache()(requests.Session)
1083
+
1084
+
1085
+ def request_get_content(url: str, n_retry: int = 3) -> bytes:
1086
+ """Retrieve the binary content at url.
1087
+
1088
+ Retry on connection errors.
1089
+ """
1090
+ t0 = time.time()
1091
+ logging.info(f"Starting download of {url}")
1092
+ for i in range(1, n_retry + 1):
1093
+ try:
1094
+ r = _session().get(url)
1095
+ r.raise_for_status()
1096
+ break
1097
+ except requests.exceptions.RequestException as e:
1098
+ # Sleep and try again on error, unless it's a 404.
1099
+ message = e.args[0] if isinstance(e.args[0], str) else ""
1100
+ if i == n_retry or "Client Error" in message:
1101
+ raise e
1102
+ warnings.warn(
1103
+ f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
1104
+ )
1105
+ time.sleep(10 * 2 ** i)
1106
+ dl_time = time.time() - t0
1107
+ dl_speed = len(r.content) / dl_time / 1024
1108
+ logging.info(
1109
+ f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
1110
+ )
1111
+ return r.content
1112
+
1113
+
1114
+ def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
1115
+ """Download the files at the given url to memory and opens it as a file.
1116
+ Assumes that the file is small, and fetch it when this function is called.
1117
+ """
1118
+ if cache and cache.exists():
1119
+ return open_read(cache)
1120
+
1121
+ # TODO: open the remote file in streaming mode.
1122
+ # The hard part is that we need to write the content on disk at the same time,
1123
+ # to implement disk caching.
1124
+ raw_bytes = request_get_content(url)
1125
+ content = io.BytesIO(raw_bytes)
1126
+ if url.endswith(".gz"):
1127
+ f: TextIO = gzip.open(content, mode="rt") # type: ignore
1128
+ else:
1129
+ f = io.TextIOWrapper(content)
1130
+
1131
+ if cache and not cache.exists():
1132
+ # The file might have been created while downloading/writing.
1133
+ tmp_cache = _tmp(cache)
1134
+ tmp_cache.write_bytes(raw_bytes)
1135
+ if not cache.exists():
1136
+ tmp_cache.replace(cache)
1137
+ else:
1138
+ tmp_cache.unlink()
1139
+
1140
+ return _close_when_exhausted(f)
1141
+
1142
+
1143
+ def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
1144
+ folder, name = file_pattern.parent, file_pattern.name
1145
+ assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
1146
+
1147
+ n = name.count("?")
1148
+ assert 0 < n < 8
1149
+ assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
1150
+ assert "r" not in mode
1151
+ files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
1152
+
1153
+ return MultiFile(files, mode, max_size)
1154
+
1155
+
1156
+ class SplitFile:
1157
+ def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
1158
+ assert mode == "r"
1159
+ size = os.path.getsize(filename)
1160
+ self.handle = open(filename, mode)
1161
+ start = chunk * size // n_chunks
1162
+ self.end: int = (chunk + 1) * size // n_chunks
1163
+
1164
+ if start > 0:
1165
+ self.handle.seek(start - 1)
1166
+ # Skip incomplete line. This avoid crashing when reading eg the middle
1167
+ # of a unicode char. `self.handle.buffer` is a binary file reader.
1168
+ self.handle.buffer.readline() # type: ignore
1169
+
1170
+ def __enter__(self):
1171
+ return self
1172
+
1173
+ def __iter__(self):
1174
+ while True:
1175
+ line = self.handle.readline()
1176
+ if not line:
1177
+ return
1178
+
1179
+ yield line
1180
+ if self.handle.tell() >= self.end:
1181
+ return
1182
+
1183
+ def readlines(self):
1184
+ return list(self.__iter__())
1185
+
1186
+ def close(self):
1187
+ self.handle.close()
1188
+
1189
+ def __exit__(self, *args):
1190
+ self.close()
1191
+
1192
+
1193
+ def get_block_readers(filename: Path, n_readers, mode="t"):
1194
+ index_filename = filename.parent / (filename.name + ".index")
1195
+ if not index_filename.exists():
1196
+ return [gzip.open(filename, "r" + mode)]
1197
+ index: List[int] = np.load(index_filename)
1198
+ n_chunks = len(index)
1199
+ chunk_per_reader = int(np.ceil(n_chunks / n_readers))
1200
+ n_readers = int(np.ceil(n_chunks / chunk_per_reader))
1201
+
1202
+ start = 0
1203
+ readers = []
1204
+ for i in range(n_readers):
1205
+ end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
1206
+ r = _blocked_gzip_reader(filename, start, end, mode)
1207
+ readers.append(r)
1208
+ start = end
1209
+ return readers
1210
+
1211
+
1212
+ def block_reader(filename: Path) -> Iterable[str]:
1213
+ root, pattern = str(filename)[:-1].split("[", 1)
1214
+ assert root.endswith(".gz"), "Can only read block of a .gz file for now."
1215
+
1216
+ ii, nn = pattern.strip().split("/")
1217
+ i, n_readers = int(ii), int(nn)
1218
+
1219
+ index_filename = root + ".index"
1220
+ assert os.path.exists(
1221
+ index_filename
1222
+ ), f"Index {index_filename} not found for {filename}"
1223
+ index: List[int] = np.load(index_filename)
1224
+ n_chunks = len(index)
1225
+ chunk_per_reader = int(np.ceil(n_chunks / n_readers))
1226
+ n_readers = int(np.ceil(n_chunks / chunk_per_reader))
1227
+ # I'm not sure how to handle the case where there is less reader than expected.
1228
+ # Currently we return empty readers.
1229
+
1230
+ start = 0
1231
+ if i > 0:
1232
+ start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
1233
+ end = index[min(i * chunk_per_reader, n_chunks - 1)]
1234
+ return _blocked_gzip_reader(root, start, end, mode="t")
1235
+
1236
+
1237
+ def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
1238
+ handle = gzip.open(filename, "r" + mode)
1239
+ handle.seek(start)
1240
+ try:
1241
+ while handle.tell() < end:
1242
+ line = handle.readline()
1243
+ if not line:
1244
+ break
1245
+ yield line
1246
+ finally:
1247
+ handle.close()
1248
+
1249
+
1250
+ class BlockedGzipWriter(MultiFile):
1251
+ """Writes a Gzip files which can be read by block.
1252
+
1253
+ Decreasing the block size may hurt compression, but provides more split points.
1254
+ """
1255
+
1256
+ def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
1257
+ assert "w" in mode
1258
+ self.filename = Path(filename)
1259
+ self.index: List[int] = []
1260
+ self.zipfile: Optional[gzip.GzipFile] = None
1261
+ super().__init__([], mode, block_size)
1262
+
1263
+ def _open_next_handle(self) -> bool:
1264
+ """Here we never actually close/open handles,
1265
+ we just write the end of block sequence."""
1266
+ if not self.current_handle:
1267
+ mode = self.mode + "t"
1268
+ self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
1269
+ assert isinstance(self.current_handle.buffer, gzip.GzipFile)
1270
+ self.zipfile = self.current_handle.buffer
1271
+ return True
1272
+
1273
+ # Use Z_FULL_FLUSH to allow random access:
1274
+ # https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
1275
+ self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
1276
+ self.index.append(self.current_handle.tell())
1277
+ self.current_block_size = 0
1278
+ return True
1279
+
1280
+ def flush(self):
1281
+ assert self.current_handle is not None
1282
+ self.current_handle.flush()
1283
+
1284
+ def close(self):
1285
+ if self.current_handle is None:
1286
+ return
1287
+ self.current_handle.flush()
1288
+ self.index.append(self.current_handle.tell())
1289
+ self.current_handle.close()
1290
+ self.current_handle = None
1291
+ index = np.array(self.index, dtype=np.uint64)
1292
+ with open(str(self.filename) + ".index", "wb") as o:
1293
+ np.save(o, index)
1294
+
1295
+
1296
+ def grouper(iterable, n):
1297
+ group = []
1298
+ for x in iterable:
1299
+ group.append(x)
1300
+ if len(group) == n:
1301
+ yield group
1302
+ group = []
1303
+ if group:
1304
+ yield group
1305
+
1306
+
1307
+ PROCESS = psutil.Process()
1308
+
1309
+
1310
+ def mem_footprint_gb(pid=None):
1311
+ rss = PROCESS.memory_info().rss
1312
+ return rss / 1_000_000_000
1313
+
1314
+
1315
+ def _tmp(output: Path) -> Path:
1316
+ suffix = "".join(output.suffixes)
1317
+ suffix = ".tmp" + suffix
1318
+ prefix = output.name[: -len(suffix)]
1319
+ _, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
1320
+ return Path(tmp_path)
1321
+
1322
+
1323
+ @functools.lru_cache()
1324
+ def _tmp_dir() -> Path:
1325
+ job_id = os.environ.get("SLURM_JOB_ID")
1326
+ if job_id:
1327
+ return Path("/scratch/slurm_tmpdir") / job_id
1328
+
1329
+ checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
1330
+ if checkpoint.exists():
1331
+ tmp = checkpoint / "tmp"
1332
+ tmp.mkdir(exist_ok=True)
1333
+ return tmp
1334
+
1335
+ return Path("/tmp")
1336
+
1337
+
1338
+ if __name__ == "__main__":
1339
+ multiprocessing.set_start_method("fork")
1340
+ main(sys.argv[1:])
cc-multilingual-main/cc_net/cc_net/mine.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ #
6
+
7
+ """
8
+ Main script to download a CC dump, remove duplicates, split by language and
9
+ filter the documents.
10
+
11
+ The pipeline parameters are described in the `Config` class.
12
+ """
13
+
14
+ import hashlib
15
+ import json
16
+ import time
17
+ import warnings
18
+ from argparse import ArgumentParser
19
+ from collections import defaultdict
20
+ from itertools import repeat
21
+ from pathlib import Path
22
+ from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
23
+
24
+ import func_argparse
25
+
26
+ # Local scripts
27
+ from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
28
+ from cc_net import regroup as regroup_module
29
+ from cc_net import split_by_lang
30
+ from cc_net.execution import Executor
31
+
32
+ # Constant
33
+ FILE_DIR = Path(__file__).parent
34
+ CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
35
+
36
+ DEFAULT_PIPELINE = [
37
+ # "dedup",
38
+ "lid",
39
+ "keep_lang",
40
+ "sp",
41
+ "lm",
42
+ "pp_bucket",
43
+ "drop",
44
+ "split_by_lang",
45
+ ]
46
+
47
+
48
+ class Config(NamedTuple):
49
+ """
50
+ Mine Common Crawl with the given settings.
51
+
52
+ config_name
53
+ dump: CC dump id
54
+ output_dir: working directory
55
+ mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
56
+ execution: chose how to parallelize the execution
57
+ num_shards: number of shards to split the dump
58
+ num_segments_per_shard: allow to download a small portion of CC (eg for tests)
59
+ min_len: remove documents shorter than this (in chars)
60
+ hashes_in_mem: number of shards hashes to use for dedup
61
+ lang_whitelist: only treat those languages
62
+ lang_blacklist: ignore those languages
63
+ lang_threshold: remove docs whose top language score is lower than this
64
+ keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
65
+ lm_dir: folder containing LMs
66
+ lm_languages: only use LMs for the following languages
67
+ cutoff: cutoff file to use for split in head/middle/tail
68
+ mine_num_processes: number of processes to use for mining
69
+ target_size: size of finals files produce during `regroup` stage
70
+ cleanup_after_regroup: delete intermediary files after regroup
71
+ task_parallelism: max number of task to run in parallel
72
+ pipeline: restricts the mining pipeline to the given steps. Order is important !
73
+ experiments: (HACK) enable specific experiments in the code
74
+ """
75
+
76
+ config_name: str = "base"
77
+ dump: str = "2017-51"
78
+ output_dir: Path = Path("data")
79
+ mined_dir: str = "mined"
80
+ execution: str = "auto"
81
+ num_shards: int = 1600
82
+ num_segments_per_shard: int = -1
83
+ metadata: Optional[str] = None
84
+ min_len: int = 300
85
+ hash_in_mem: int = 50
86
+ lang_whitelist: Sequence[str] = ['hi']
87
+ lang_blacklist: Sequence[str] = []
88
+ lang_threshold: float = 0.5
89
+ keep_bucket: Sequence[str] = []
90
+ lm_dir: Path = Path("data/lm_sp")
91
+ cutoff: Path = CUTOFF_CSV
92
+ lm_languages: Optional[Sequence[str]] = None
93
+ mine_num_processes: int = 16
94
+ target_size: str = "4G"
95
+ cleanup_after_regroup: bool = False
96
+ task_parallelism: int = -1
97
+ pipeline: Sequence[str] = DEFAULT_PIPELINE
98
+ experiments: Sequence[str] = []
99
+ cache_dir: Optional[Path] = None
100
+
101
+ def get_executor(
102
+ self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
103
+ ) -> Executor:
104
+ name = "_".join((name, self.config_name, *self.experiments))
105
+ return execution.get_executor(
106
+ name,
107
+ self.output_dir / "logs",
108
+ self.execution,
109
+ timeout_hour=timeout_hour,
110
+ mem_gb=mem_gb,
111
+ cpus=cpus,
112
+ task_parallelism=self.task_parallelism,
113
+ )
114
+
115
+ def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
116
+ dump_cache: Optional[Path] = None
117
+ if self.cache_dir:
118
+ self.cache_dir.mkdir(exist_ok=True)
119
+ dump_cache = self.cache_dir / self.dump
120
+ dump_cache.mkdir(exist_ok=True)
121
+
122
+ return process_wet_file.CCShardReader(
123
+ self.dump,
124
+ shard=shard,
125
+ num_shards=self.num_shards,
126
+ num_segments_per_shard=self.num_segments_per_shard,
127
+ min_len=self.min_len,
128
+ cache_dir=dump_cache,
129
+ )
130
+
131
+ @classmethod
132
+ def from_json(cls, json_file: Path) -> "Config":
133
+ raw_lines = json_file.read_text().splitlines()
134
+ raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
135
+ json_config = json.loads("".join(raw_lines))
136
+ path_keys = ["cache_dir", "lm_dir", "output_dir"]
137
+ for key in path_keys:
138
+ if key in json_config:
139
+ json_config[key] = Path(json_config[key])
140
+ return Config(**json_config)
141
+
142
+ @property
143
+ def will_split(self) -> bool:
144
+ return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
145
+
146
+ def get_lm_languages(self) -> Sequence[str]:
147
+ if self.lm_languages is not None:
148
+ return self.lm_languages
149
+
150
+ if self.lang_whitelist:
151
+ return self.lang_whitelist
152
+
153
+ languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
154
+ if self.lang_blacklist:
155
+ languages = [l for l in languages if l not in self.lang_blacklist]
156
+ return languages
157
+
158
+ def get_mined_dir(self, regroup: bool = False) -> Path:
159
+ if self.will_split and not regroup:
160
+ return self.output_dir / f"{self.mined_dir}_split" / self.dump
161
+ return self.output_dir / self.mined_dir / self.dump
162
+
163
+
164
+ BASE_CONFIG = Config()
165
+
166
+ BYLANG_CONFIG = Config(
167
+ config_name="by_lang",
168
+ mined_dir="mined_by_lang",
169
+ pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
170
+ )
171
+
172
+ REPRODUCE_CONFIG = Config(
173
+ config_name="reproduce",
174
+ dump="2019-09",
175
+ mined_dir="reproduce",
176
+ pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
177
+ metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
178
+ # Optional filtering:
179
+ # It won't change much the execution speed, but decreases the disk requirement.
180
+ # Restrict languages
181
+ lang_whitelist=["fr"],
182
+ # Restrict perplexity buckets
183
+ # Top languages have been split in perplexity buckets according
184
+ # to a Wikipedia trained LM.
185
+ # The buckets from low perplexity (good) to high (bad) are:
186
+ # ["head", "middle", "tail"]
187
+ # Languages without a LM have only one bucket "all".
188
+ # It won't change much the execution speed, but decreases the disk requirement.
189
+ keep_bucket=["head", "all"],
190
+ mine_num_processes=1,
191
+ )
192
+
193
+ TEST_CONFIG = BASE_CONFIG._replace(
194
+ config_name="test",
195
+ dump="2019-09",
196
+ output_dir=Path("test_data"),
197
+ execution="local",
198
+ num_shards=4,
199
+ num_segments_per_shard=1,
200
+ hash_in_mem=2,
201
+ mine_num_processes=2,
202
+ lang_whitelist=["de", "it", "fr"],
203
+ target_size="32M",
204
+ cleanup_after_regroup=False,
205
+ cache_dir=Path("test_data/wet_cache"),
206
+ )
207
+
208
+ PREDEF_CONFIGS = {
209
+ "base": BASE_CONFIG,
210
+ "by_lang": BYLANG_CONFIG,
211
+ "test": TEST_CONFIG,
212
+ "test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
213
+ "debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
214
+ "reproduce": REPRODUCE_CONFIG,
215
+ "augment": BASE_CONFIG._replace(
216
+ config_name="augment", dump="2019-13", lang_blacklist=["en"]
217
+ ),
218
+ }
219
+
220
+
221
+ def tmp(output: Path) -> Path:
222
+ return output.parent / (output.stem + ".tmp" + output.suffix)
223
+
224
+
225
+ def finalize(tmp_output: Path, output: Path) -> None:
226
+ if not tmp_output.exists():
227
+ warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
228
+ return
229
+
230
+ tmp_index = tmp_output.parent / (tmp_output.name + ".index")
231
+ tmp_output.rename(output)
232
+
233
+ if tmp_index.exists():
234
+ tmp_index.rename(output.parent / (output.name + ".index"))
235
+
236
+
237
+ def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
238
+ if n < 0:
239
+ n = len(iterable[0])
240
+ columns: tuple = tuple([] for _ in range(n))
241
+ for row in iterable:
242
+ assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
243
+ for i in range(n):
244
+ columns[i].append(row[i])
245
+
246
+ return columns
247
+
248
+
249
+ def hashes(conf: Config) -> List[Path]:
250
+ """Computes hashes for each shard."""
251
+
252
+ hashes_dir = conf.output_dir / "hashes" / conf.dump
253
+ outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
254
+ missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
255
+
256
+ if not missing_outputs:
257
+ return outputs
258
+
259
+ hashes_dir.mkdir(parents=True, exist_ok=True)
260
+ # With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
261
+ # overhead due to how the dynamic allocation works.
262
+ ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
263
+ ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
264
+
265
+ # Wait a bit so that files appears on the disk.
266
+ time.sleep(20)
267
+ assert all(o.exists() for o in outputs)
268
+ return outputs
269
+
270
+
271
+ def _hashes_shard(conf: Config, shard: int, output: Path):
272
+ tmp_output = tmp(output)
273
+ jsonql.run_pipes(
274
+ dedup.HashesCollector(field="raw_content", output=tmp_output),
275
+ inputs=conf.get_cc_shard(shard),
276
+ )
277
+ finalize(tmp_output, output)
278
+ return f"Hashed {output}"
279
+
280
+
281
+ HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
282
+
283
+
284
+ def mine(conf: Config) -> List[Path]:
285
+ """Remove dups, run LID and LMs, and split by lang and quality."""
286
+ mined_dir = conf.get_mined_dir()
287
+ if conf.will_split:
288
+ # Give a directories when splitting
289
+ outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
290
+ else:
291
+ # Files otherwise
292
+ outputs = [
293
+ mined_dir / f"{shard:04d}.json.gz" for shard in range(conf.num_shards)
294
+ ]
295
+
296
+ if "mini_again" in conf.experiments:
297
+ mined_dir = conf.output_dir / "mini_again" / conf.dump
298
+ outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
299
+
300
+ # TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
301
+ mem_gb = 60 + 1 * conf.hash_in_mem
302
+ timeout_hour = 5
303
+ if "hashes" in conf.experiments:
304
+ # HACK: used for generating paper figures
305
+ outputs = [
306
+ conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
307
+ for h in HASHES_IN_MEM
308
+ ]
309
+ mem_gb = int(max(HASHES_IN_MEM) * 1.2)
310
+ timeout_hour = 8
311
+
312
+ missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
313
+
314
+ if "mini_again" in conf.experiments:
315
+ missing_outputs = [
316
+ (shard, o)
317
+ for shard, o in enumerate(outputs)
318
+ if shard in [5, 139] and not o.exists()
319
+ ]
320
+
321
+ if not missing_outputs:
322
+ return outputs
323
+
324
+ mined_dir.mkdir(parents=True, exist_ok=True)
325
+ ex = conf.get_executor(
326
+ f"mine_{conf.dump}",
327
+ mem_gb=mem_gb,
328
+ timeout_hour=timeout_hour,
329
+ cpus=conf.mine_num_processes + 1,
330
+ )
331
+
332
+ # Compute hashes firsts.
333
+ if "dedup" in conf.pipeline:
334
+ hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
335
+ hashes_files: Iterable[List[Path]] = [
336
+ hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
337
+ ]
338
+ else:
339
+ hashes_files = repeat([])
340
+
341
+ ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
342
+
343
+ assert all(o.exists() for o in outputs)
344
+ return outputs
345
+
346
+
347
+ def _get_segment(tmp_output: Path, doc: dict) -> str:
348
+ segment: str = doc["cc_segment"].split("/")[-1]
349
+ return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
350
+
351
+
352
+ def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
353
+ assert conf.pipeline
354
+ tmp_output = tmp(output)
355
+ if "hashes" in conf.experiments:
356
+ # HACK: used for generating paper figures
357
+ hashes_in_mem = shard
358
+ hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
359
+ shard = 0
360
+ cc_shard = conf.get_cc_shard(shard)
361
+
362
+ steps: Dict[str, Optional[jsonql.Transformer]] = {}
363
+ lang_id = Path("bin") / "lid.bin"
364
+ steps["lid_before_dedup"] = split_by_lang.Classifier(
365
+ model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
366
+ )
367
+ steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
368
+
369
+ steps["lid"] = split_by_lang.Classifier(
370
+ model=lang_id,
371
+ field="raw_content",
372
+ out_field="language",
373
+ top=1,
374
+ threshold=conf.lang_threshold,
375
+ )
376
+ steps["lid_after_dedup"] = split_by_lang.Classifier(
377
+ model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
378
+ )
379
+
380
+ if conf.lang_blacklist:
381
+ steps["keep_lang"] = jsonql.where(
382
+ [lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
383
+ )
384
+ elif conf.lang_whitelist:
385
+ steps["keep_lang"] = jsonql.where(
386
+ [lambda doc: doc.get("language") in set(conf.lang_whitelist)]
387
+ )
388
+ else:
389
+ steps["keep_lang"] = None
390
+
391
+ tok_field = "tokenized"
392
+ steps["sp"] = perplexity.MultiSentencePiece(
393
+ {l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
394
+ field="raw_content",
395
+ output_field=tok_field,
396
+ normalize=True,
397
+ )
398
+ steps["lm"] = perplexity.DocLM(
399
+ {l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
400
+ field=tok_field,
401
+ output_field="perplexity",
402
+ normalize=False, # Normalization is done before SentencePiece
403
+ # load_method=kenlm.LoadMethod.PARALLEL_READ,
404
+ )
405
+ steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
406
+ steps["drop"] = perplexity.DropKeys(tok_field)
407
+
408
+ steps["keep_bucket"] = None
409
+ if conf.keep_bucket:
410
+ steps["keep_bucket"] = jsonql.where(
411
+ [lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
412
+ )
413
+
414
+ if "fetch_metadata" in conf.pipeline:
415
+ # TODO: better default
416
+ assert conf.metadata is not None
417
+ steps["fetch_metadata"] = minify.MetadataFetcher(
418
+ f"{conf.metadata}/{conf.dump}/"
419
+ )
420
+
421
+ steps["minify"] = minify.Minifier()
422
+
423
+ pattern = str(tmp_output / "{language}_{bucket}.json.gz")
424
+ steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
425
+
426
+ steps["split_by_segment"] = jsonql.split(
427
+ split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
428
+ )
429
+
430
+ pipeline = filter(None, (steps[s] for s in conf.pipeline))
431
+
432
+ jsonql.run_pipes(
433
+ *pipeline,
434
+ inputs=cc_shard,
435
+ processes=conf.mine_num_processes,
436
+ chunksize=100,
437
+ # The splitter takes care of writing to files.
438
+ output=tmp_output if not conf.will_split else None,
439
+ )
440
+ finalize(tmp_output, output)
441
+ return f"Mined {output}"
442
+
443
+
444
+ def regroup(conf: Config, all_dirs: List[Path]) -> Path:
445
+ """Reshards each language/quality after 'mine'."""
446
+ regroup_dir = conf.get_mined_dir(regroup=True)
447
+ assert all_dirs
448
+ all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
449
+ if not all_files:
450
+ print(f"No .json.gz file found in {all_dirs[0]}")
451
+
452
+ splits: Dict[str, List[Path]] = defaultdict(list)
453
+ for f in all_files:
454
+ split = f.name.split(".")[0]
455
+ splits[split].append(f)
456
+
457
+ print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
458
+ inputs: List[List[Path]] = []
459
+ outputs: List[Path] = []
460
+ target_size = jsonql.parse_size(conf.target_size)
461
+ for split, files in splits.items():
462
+ cuts = list(regroup_module.determine_groups(files, target_size=target_size))
463
+ if not cuts:
464
+ continue
465
+
466
+ pattern = f"{split}_????.json.gz"
467
+ existing_outputs = sorted(regroup_dir.glob(pattern))
468
+
469
+ if not conf.cleanup_after_regroup:
470
+ # We still have all the inputs so it is safe to overwrite existing outputs.
471
+ assert len(existing_outputs) <= len(cuts)
472
+ existing_outputs = []
473
+
474
+ if len(existing_outputs) > 0 and len(cuts) == 1:
475
+ # append to existing file if size allows it.
476
+ new_size = (
477
+ sum(f.stat().st_size for f in cuts[0])
478
+ + existing_outputs[-1].stat().st_size
479
+ )
480
+ if new_size < target_size:
481
+ print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
482
+ cuts[0].insert(0, existing_outputs.pop(-1))
483
+
484
+ n_existing = len(existing_outputs)
485
+ for i, cut in enumerate(cuts):
486
+ # avoid overwriting existing files.
487
+ j = i + n_existing
488
+ output = regroup_dir / f"{split}_{j:04}.json.gz"
489
+ inputs.append(cut)
490
+ outputs.append(output)
491
+ print(
492
+ str(regroup_dir / pattern),
493
+ "->",
494
+ len(cuts),
495
+ f"shards ({n_existing} already there).",
496
+ )
497
+
498
+ ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
499
+ ex(_regroup, repeat(conf), inputs, outputs)
500
+
501
+ return regroup_dir
502
+
503
+
504
+ def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
505
+ output.parent.mkdir(parents=True, exist_ok=True)
506
+ regroup_module.fast_reshard(
507
+ inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
508
+ )
509
+ return f"Regrouped {output}"
510
+
511
+
512
+ def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
513
+ """Reshards each language/quality after 'mine'."""
514
+ # check that mining is over.
515
+ regroup_dir = conf.get_mined_dir(regroup=True)
516
+ assert all_dirs, "Received no dirs to move"
517
+ assert all(
518
+ d.is_dir() for d in all_dirs
519
+ ), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
520
+
521
+ regroup_dir.parent.mkdir(exist_ok=True)
522
+ regroup_dir.mkdir(exist_ok=True)
523
+ ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
524
+
525
+ def _move_segments(subdir: Path, regroup_dir: Path) -> str:
526
+ n = 0
527
+ for f in subdir.iterdir():
528
+ if not f.is_file() or f.is_symlink():
529
+ continue
530
+ n += f.name.endswith(".json.gz")
531
+ new_name = regroup_dir / f.name
532
+ target = new_name.resolve()
533
+ assert f.resolve() != target
534
+ # this make the job idempotent.
535
+ f.rename(new_name)
536
+ f.symlink_to(target)
537
+
538
+ if n == 0:
539
+ return ""
540
+
541
+ return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
542
+
543
+ ex(_move_segments, all_dirs, repeat(regroup_dir))
544
+ print(f"Results are in {regroup_dir}")
545
+ return regroup_dir
546
+
547
+
548
+ def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
549
+ stats: Dict[str, dict] = {}
550
+ for file in sorted(output_dir.glob("*.json.gz")):
551
+ fname = "/".join((file.parent.name, file.name))
552
+ # The order of documents is not guaranteed inside a shard,
553
+ lines = sorted(jsonql.open_read(file))
554
+ content = "\n".join(lines)
555
+ size = len(content)
556
+ checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
557
+ # first_document = json.loads(lines[0])
558
+ stats[fname] = {"size": size, "checksum": checksum}
559
+
560
+ def dump(x):
561
+ return json.dumps(x, indent=2, ensure_ascii=False)
562
+
563
+ print("*** Stats ***")
564
+ stats_raw = dump(stats)
565
+ stats_file = FILE_DIR / "data" / "test_stats.json"
566
+ if generate:
567
+ print("Saving stats to", stats_file)
568
+ stats_file.write_text(stats_raw)
569
+ return
570
+
571
+ expected_stats: Dict[str, dict] = {}
572
+ if stats_file.exists():
573
+ expected_stats = json.loads(stats_file.read_text())
574
+
575
+ if expected_stats == stats:
576
+ print("Everything looks good !")
577
+ return
578
+
579
+ stats_file.with_suffix(".actual.json").write_text(stats_raw)
580
+ print("*** Expected Stats ***")
581
+ print(dump(expected_stats))
582
+
583
+ print("*** Diff ***")
584
+ for fname in sorted(expected_stats.keys()):
585
+ print(fname)
586
+ assert fname in expected_stats, "missing file " + fname
587
+ if expected_stats[fname]["size"] != stats[fname]["size"]:
588
+ print(
589
+ " - Expected size",
590
+ expected_stats[fname]["size"],
591
+ ", size",
592
+ stats[fname]["size"],
593
+ )
594
+ if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
595
+ print(
596
+ " - Expected checksum",
597
+ expected_stats[fname]["checksum"],
598
+ ", checksum",
599
+ stats[fname]["checksum"],
600
+ )
601
+
602
+
603
+ def get_main_parser() -> ArgumentParser:
604
+ # Generates the 'main' parser by patching a 'Config' parser
605
+ p = func_argparse.func_argparser(Config)
606
+
607
+ # Override defaults value to None, so we know what was set by the user.
608
+ # Note that it will keep the original default values in the help message.
609
+ p.set_defaults(**{f: None for f in Config._fields})
610
+ p.add_argument("--config", type=str, default="base")
611
+ p.set_defaults(__command=main)
612
+ return p
613
+
614
+
615
+ def main(config: str = "base", **config_as_dict: Any) -> None:
616
+ # Use the given 'config' as default value.
617
+ config_base = config
618
+ if config_base in PREDEF_CONFIGS:
619
+ conf = PREDEF_CONFIGS[config_base]
620
+ elif Path(config_base).exists():
621
+ conf = Config.from_json(Path(config_base))
622
+ else:
623
+ raise ValueError(
624
+ f"Invalid value {config_base} for --config. "
625
+ f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
626
+ )
627
+ conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
628
+
629
+ print(f"Will run cc_net.mine.main with the following config:", conf)
630
+
631
+ all_files = mine(conf)
632
+ if conf.will_split:
633
+ assert all_files
634
+ assert all(d.is_dir() for d in all_files)
635
+ all_dirs = all_files
636
+ if "split_by_lang" in conf.pipeline:
637
+ # Only try regrouping if we split the shards.
638
+ regroup(conf, all_dirs)
639
+ elif "split_by_segment" in conf.pipeline:
640
+ # If we split by segment then regrouping is trivial, since segments appear in only one shard.
641
+ move_segments(conf, all_dirs)
642
+
643
+ if conf.config_name == "test":
644
+ _validate_test(conf, conf.get_mined_dir(regroup=True))
645
+
646
+
647
+ if __name__ == "__main__":
648
+ func_argparse.parse_and_call(get_main_parser())