diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/__init__.py b/cc-multilingual-main/cc_net/build/lib/cc_net/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..602d26857d421a1161d92b669da1739f292f6c96 --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/__main__.py b/cc-multilingual-main/cc_net/build/lib/cc_net/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..23508400a7c2e09c54e53b6ab00ed857a3555042 --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/__main__.py @@ -0,0 +1,18 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + + +import func_argparse + +import cc_net.mine + + +def main(): + func_argparse.parse_and_call(cc_net.mine.get_main_parser()) + + +if __name__ == "__main__": + main() diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/dedup.py b/cc-multilingual-main/cc_net/build/lib/cc_net/dedup.py new file mode 100644 index 0000000000000000000000000000000000000000..fe0d4275793e2ad6602c60ab22e2381054a7a6aa --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/dedup.py @@ -0,0 +1,478 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Tools to remove duplicate paragraphs across one or several shards. +""" + +import argparse +import gc +import hashlib +import logging +import multiprocessing +import os +import tempfile +import time +from pathlib import Path +from typing import Iterable, List, Optional, Set, Union + +import numpy as np + +from cc_net import jsonql +from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet +from cc_net.jsonql import mem_footprint_gb +from cc_net.text_normalizer import normalize_for_dedup + +BYTE_ORDER = "little" +HASH_SIZE = HASH_TYPE(0).nbytes +DISABLE_MULTI_PROCESSING = False + +FilesOrDir = Union[List[Path], Path] + + +def get_args(): + parser = argparse.ArgumentParser( + description="Read a set of json files and allow to query them", + parents=[jsonql.io_parser()], + ) + + parser.add_argument("--field", type=str, default="raw_content") + parser.add_argument("--output_hashes", type=str) + parser.add_argument("--no_finalize", action="store_false", dest="finalize") + # parser.add_argument("--mem_gb", type=int) + parser.add_argument("--hashes", type=str) + + return vars(parser.parse_args()) + + +def _b2i(b: bytes) -> int: + return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0) + + +def str_hash(s: str) -> int: + h = hashlib.sha1(bytes(s, encoding="utf-8")) + return _b2i(h.digest()) + + +log = logging.getLogger(__name__).info + + +def run_par(processes): + # This is different from multiprocessing.map since it allows for kwargs. + processes = list(processes) + if len(processes) == 1 or DISABLE_MULTI_PROCESSING: + for f, args, kwargs in processes: + f(*args, **kwargs) + return + + log(f"Starting {len(processes)} subprocess") + processes = [ + multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes + ] + for p in processes: + p.start() + for p in processes: + p.join() + failed = 0 + for p in processes: + if p.exitcode != 0: + log(f"Process failed with code {p.exitcode}: {p}") + failed += 1 + assert failed == 0, f"{failed} processes failed..." + + +def split_file(file, n_splits): + for i in range(n_splits): + yield jsonql.SplitFile(file, i, n_splits) + + +def merge(hashes_1, hashes_2, output): + if isinstance(hashes_1, str): + h1 = FlatHashSet() + h1.load(hashes_1) + else: + h1 = hashes_1 + + if isinstance(hashes_2, str): + h2 = FlatHashSet() + h2.load(hashes_2) + else: + h2 = hashes_2 + + h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2)) + dup = h1.__contains__(h2_np) + + # Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to + # h1 with their value. + h1[h2_np] = dup + if output: + h1.dump(output) + return h1 + + +def merge_shard(hash_files, output): + h = FlatHashSet() + h.load(hash_files[0]) + for hash_file in hash_files[1:]: + h = merge(h, hash_file, output=None) + print(f"Merged {hash_file}. We now have {len(h)} hashes.") + + h.dump(output) + print(f"Saved {len(h)} hashes to {output}.") + + +def _dump_sentence_hashes(source: Path, output: Path, field: str): + treated = 0 + started = time.time() + with open(output, "wb") as o: + for doc in jsonql.read_jsons(source): + content = doc.get(field) + if not content: + continue + h = compute_hashes(content) + if h is None: + continue + h.tofile(o) + treated += 1 + if treated % 100_000 == 0: + delay = time.time() - started + log( + f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)" + ) + + +def _remove_duplicate_hashes(duplicates, source, output): + batch_size = 100_000 + n_lines, n_lines_kept = 0, 0 + with open(source, "rb") as f, open(output, "wb") as o: + log(f"Opening {source} with mode rb") + log(f"Opening {output} with mode wb") + while True: + hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size) + if hashes.size == 0: + break + + keep = duplicates[hashes] < 1 + kept = keep.sum() + hashes *= keep + hashes.tofile(o) + + n_lines += hashes.size + n_lines_kept += kept + + removed = n_lines - n_lines_kept + selectivity = n_lines_kept / n_lines if n_lines else 0 + log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}") + + +def remove_duplicates_sharded( + files: List[Path], + outputs: List[Path], + hashes_dir: FilesOrDir, + field: str, + group_hashes: int = 1, + tmp_dir: Path = None, + min_len: int = 0, +): + """Remove duplicates in several passes, when all hashes don't fit in RAM. + + Note: The current implementation is not doing a 'perfect' deduplication. + If a hash appear exactly once in each shard of hashes it won't be detected + as a duplicate. This can be fixed if hashes are fully dedup beforehand. + """ + assert len(files) == len(outputs) + + if isinstance(hashes_dir, list): + hashes_files = hashes_dir + else: + hashes_files = sorted( + h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin" + ) + + assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}" + + if len(hashes_files) <= group_hashes: + log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}") + rm_dups = DuplicatesRemover(field, hashes_files) + rm_dups._prepare() + run_par( + (jsonql.run_pipes, (rm_dups,), dict(file=f, output=o)) + for f, o in zip(files, outputs) + ) + return + + log(f"Starting deduplicate_sharded on {files}.") + tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None) + + def tmp_files(i): + return [ + Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin") + for f in files + ] + + last = tmp_files(0) + run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last)) + + if isinstance(hashes_dir, list): + hashes_files = hashes_dir + else: + hashes_files = sorted( + h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin" + ) + for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)): + hashes = FlatHashSet() + for h in group: + hashes.load(h) + log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)") + + intermediates = tmp_files(i + 1) + # Remove hashes in parallel. Since modern OS have "copy-on-write" and + # `hashes` is read-only, we will only have one version of it in RAM. + run_par( + (_remove_duplicate_hashes, (hashes, f, tmp), {}) + for f, tmp in zip(last, intermediates) + ) + # Force hashes to be freed, before we start allocating a new one. + del hashes + gc.collect() + + for tmp in last: + os.remove(tmp) + last = intermediates + + def finalize(source, dedup_hashes, min_len): + n_chars, n_chars_kept = 0, 0 + with open(dedup_hashes, "rb") as hashes: + for doc in jsonql.read_jsons(source): + content = doc.get(field) + if not content or len(content) < min_len: + continue + sentences = content.split("\n") + doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences)) + chars, kept_chars = finalize_doc(doc, field, doc_hashes) + n_chars += chars + n_chars_kept += kept_chars + yield doc + selectivity = n_chars_kept / n_chars if n_chars else 0 + log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).") + + dedup_hashes = last + run_par( + [ + ( + jsonql.run_pipe, + (finalize,), + dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o), + ) + for h, f, o in zip(dedup_hashes, files, outputs) + ] + ) + + tmp_directory.cleanup() + + +def compute_hashes(content) -> Optional[np.ndarray]: + if not content: + return None + lines = content.split("\n") + # save hashes as bytes but reinterpret them as uint64. + hashes = np.fromiter( + ( + hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[ + :HASH_SIZE + ] + for l in lines + ), + dtype=np.dtype((bytes, HASH_SIZE)), + count=len(lines), + ) + return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape) + + +def finalize_doc(doc, field, hashes=None): + content = doc.get(field) + lines = content.split("\n") + n_chars = len(content) + if "original_nlines" not in doc: + doc["original_nlines"] = doc.get("nlines", len(lines)) + if "original_length" not in doc: + doc["original_length"] = doc.get("length", n_chars) + if hashes is None: + hashes = doc.pop(field + "_hash") + + # Remove duplicates inside doc + seen: Set[int] = set() + original_line_ids = doc.get("line_ids", range(len(hashes))) + line_ids = [] + new_lines = [] + for l, line, h in zip(original_line_ids, lines, hashes): + if h not in seen and h != 0: + line_ids.append(l) + new_lines.append(line) + seen.add(h) + + doc[field] = "\n".join(new_lines) + doc["nlines"] = len(line_ids) + n_chars_kept = len(doc[field]) + doc["length"] = n_chars_kept + doc["line_ids"] = line_ids + return n_chars, n_chars_kept + + +class HashesCollector(jsonql.Transformer): + """ + Collect all hashes found of lines found in the `field` of the source documents. + """ + + parallelisable = False + + def __init__( + self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None + ): + super().__init__() + self.n_lines = 0 + self.field = field + self.output = output + self.hashes = FlatHashSet() if hashes is None else hashes + self.num_hashes_end = 0 + self.num_hashes_start = len(self.hashes) + + def summary(self) -> List[str]: + summ = super().summary() + h = self.num_hashes_end if self.hashes is None else len(self.hashes) + h = (h - self.num_hashes_start) // 1000 + max_mem = mem_footprint_gb() + n = self.n_lines // 1000 + summ.append( + f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM." + ) + return summ + + def do(self, doc: dict) -> None: + doc_hashes = compute_hashes(doc.get(self.field)) + if doc_hashes is None: + return + self.hashes.add(doc_hashes) + self.n_lines += doc_hashes.size + + def close(self): + if self.output and self.hashes: + self.hashes.dump(self.output) + self.log(f"Saved {len(self.hashes)} hashes to {self.output}") + # Save the number of hashes. + self.num_hashes_end = len(self.hashes) + # Free up mem even if the transformer is kept somewhere else. + self.hashes = None # type: ignore + + +class DuplicatesRemover(jsonql.Transformer): + """DuplicatesRemover""" + + # The hashes can't be pickled so they will have to be read back from disk. + warn_when_pickling = True + + def __init__(self, field: str, hashes_files: List[Path], collect: bool = False): + """ + Remove duplicates + """ + super().__init__() + self.field = field + self.collect = collect + + self.hashes_files = hashes_files + self.duplicates: Optional[AbstractDedupHashSet] = None + + self.n_lines, self.n_lines_kept = 0, 0 + self.n_chars, self.n_chars_kept = 0, 0 + + def _prepare(self): + if self.duplicates is not None: + return + self.duplicates = FlatHashSet() + + start = time.time() + for h in self.hashes_files: + shard_start = time.time() + self.duplicates.load(str(h)) + delay = time.time() - shard_start + self.log( + f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)" + ) + + delay = time.time() - start + self.log( + f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)" + ) + + def do(self, doc: dict) -> Optional[dict]: + content = doc.get(self.field) + if not content: + return None + doc_hashes = compute_hashes(content) + + assert self.duplicates is not None + seen = ( + self.duplicates.add(doc_hashes) + if self.collect + else self.duplicates[doc_hashes] + ) + keep = seen < True + kept = keep.sum() + if kept == 0: + return None + doc_hashes = doc_hashes * keep + self.n_lines += keep.size + self.n_lines_kept += kept + chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes) + self.n_chars += chars + self.n_chars_kept += kept_chars + return doc + + def summary(self) -> List[str]: + summ = super().summary() + end_time = time.time() + n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed + speed = n_docs / (end_time - self.start_time) + summ.append( + f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]" + ) + selectivity = self.n_lines_kept / self.n_lines if n_lines else 0 + summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).") + + n_chars_kept, n_chars = self.n_chars_kept, self.n_chars + selectivity = n_chars_kept / n_chars if n_chars else 0 + summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).") + return summ + + +def deduplicate( + file: jsonql.ReadableFileLike, field: str = "raw_content" +) -> Iterable[dict]: + """Remove duplicates of the given file (but keep the first occurence).""" + dup_remover = DuplicatesRemover(field, [], collect=True) + return dup_remover.map(jsonql.read_jsons(file)) + + +def deduplicate_two_pass( + file: jsonql.FileDescriptor, field: str = "raw_content" +) -> Iterable[dict]: + """Remove duplicates of the given file (even removing the first occurence). + + This is what is done in the paper, and in mine.py + """ + try: + if isinstance(file, Path): + hash_file: Path = file.with_suffix(".bin") + else: + hash_file = jsonql._tmp(Path("hashes.bin")) + jsonql.run_pipes( + jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file + ) + dup_remover = DuplicatesRemover(field, [hash_file]) + return dup_remover.map(jsonql.read_jsons(file)) + finally: + if hash_file.exists(): + hash_file.unlink() diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/execution.py b/cc-multilingual-main/cc_net/build/lib/cc_net/execution.py new file mode 100644 index 0000000000000000000000000000000000000000..874ca145006b3ae86e86c48225e1fe0a30d12236 --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/execution.py @@ -0,0 +1,248 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import functools +import itertools +import logging +import os +import sys +import time +import warnings +from pathlib import Path +from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized + +import submitit +from typing_extensions import Protocol +# import pdb +from concurrent.futures import ThreadPoolExecutor + + +class Executor(Protocol): + def __call__(self, function: Callable[..., str], *args: Iterable) -> None: + ... + + +class SubmititRetryOnTimeout(submitit.helpers.Checkpointable): + def __init__(self, fn: Callable): + self.fn = fn + self.__name__ = fn.__name__ + + def __call__(self, *args, **kwargs): + return self.fn(*args, **kwargs) + + +def get_executor( + name: str, + log_dir: Path, + execution: str, + timeout_hour: float = 1.0, + mem_gb: int = 1, + cpus: int = 1, + task_parallelism: int = -1, + options: dict = {}, +) -> Executor: + + execution_mode = execution.split(",")[0] + options.update( + {kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]} + ) + + if execution_mode == "mp": + warnings.warn("Execution mode 'mp' is deprecated, use 'local'.") + execution_mode = "local" + + cluster = None if execution_mode == "auto" else execution_mode + # use submitit to detect which executor is available + ex = submitit.AutoExecutor(log_dir, cluster=cluster) + ex.parameters['timeout_min'] = int(timeout_hour * 60) + + if ex.cluster == "local": + # LocalExecutor doesn't respect task_parallelism + return functools.partial(custom_map_array, ex, task_parallelism) + if ex.cluster == "debug": + return debug_executor + # pdb.set_trace() + # We are on slurm + if task_parallelism == -1: + task_parallelism = 500 + + ex.update_parameters( + name=name, + timeout_min=int(timeout_hour * 60), + mem_gb=mem_gb, + cpus_per_task=cpus, + slurm_array_parallelism=task_parallelism, + **options, + ) + return functools.partial(map_array_and_wait, ex) + + +def map_array_and_wait( + ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable +): + f_name = function.__name__ + + assert len(args) > 0, f"No arguments passed to {f_name}" + approx_length = _approx_length(*args) + + print(f"Submitting {f_name} in a job array ({approx_length} jobs)") + jobs = ex.map_array(function, *args) + if not jobs: + return + failed_jobs = [] + done = 0 + total = len(jobs) + job_array_id = jobs[0].job_id.split("_")[0] + # pdb.set_trace() + print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).") + for job in submitit.helpers.as_completed(jobs): + done += 1 + e = job.exception() + if not e: + print(f"Finished job {job.job_id} ({done} / {total}).", job.result()) + continue + + print(f"Failed job {job.job_id} ({done} / {total}):", e) + failed_jobs.append(job) + + if failed_jobs: + n_failures = 10 + message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}" + print(message) + for job in failed_jobs[:n_failures]: + print(f"Failed {job.job_id} -> {job.paths.stderr}") + if len(failed_jobs) > n_failures: + print(f"... ({len(failed_jobs) - n_failures} failed job skipped)") + raise Exception(message) + + +def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None: + logging.getLogger().setLevel(logging.DEBUG) + approx_length = _approx_length(*args) + for i, x in enumerate(zip(*args)): + try: + message = function(*x) + except Exception: + exit(1) + try: + import ipdb as pdb # type: ignore + except ImportError: + import pdb # type: ignore + import traceback + + traceback.print_exc() + print("") + pdb.post_mortem() + sys.exit(1) + if message is not None: + print(message, f"({i + 1} / {approx_length})") + +# def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None: +# logging.getLogger().setLevel(logging.DEBUG) +# approx_length = _approx_length(*args) +# with ThreadPoolExecutor(max_workers=4) as executor: +# futures = [] +# for i, x in enumerate(zip(*args)): +# future = executor.submit(_execute_function, function, x, i + 1, approx_length) +# futures.append(future) +# for future in futures: +# future.result() + +# def _execute_function(function: Callable[..., Optional[str]], args: tuple, index: int, total: int): +# try: +# message = function(*args) +# if message is not None: +# print(message, f"({index} / {total})") +# except Exception: +# # traceback.print_exc() +# sys.exit(1) + +def _approx_length(*args: Iterable): + for a in args: + if isinstance(a, Sized): + return len(a) + return -1 + + +def custom_map_array( + ex: submitit.AutoExecutor, + parallelism: int, + function: Callable[..., Optional[str]], + *args: Iterable, +) -> None: + f_name = function.__name__ + assert len(args) > 0, f"No arguments passed to {f_name}" + + jobs_args = list(zip(*args)) + total = len(jobs_args) + if parallelism < 0: + parallelism = os.cpu_count() or 0 + assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}" + print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}") + enqueued = 0 + done = 0 + running_jobs: List[submitit.Job] = [] + failed_jobs: List[submitit.Job] = [] + + while done < len(jobs_args): + # Try to queue more job if we have some bandwidth. + if enqueued < total and len(running_jobs) < parallelism: + running_jobs.append(ex.submit(function, *jobs_args[enqueued])) + enqueued += 1 + continue + + # Else wait for some job to finish + if not running_jobs: + warnings.warn( + f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}" + ) + break + + job = get_next_job(running_jobs) + running_jobs.remove(job) + done += 1 + e = job.exception() + if not e: + print(f"Finished job {job.job_id} ({done} / {total}).", job.result()) + continue + + print(f"Failed job {job.job_id} ({done} / {total}):", e) + failed_jobs.append(job) + + if failed_jobs: + n_failures = 10 + message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}" + print(message) + for job in failed_jobs[:n_failures]: + print(f"Failed {job.job_id} -> {job.paths.stderr}") + if len(failed_jobs) > n_failures: + print(f"... ({len(failed_jobs) - n_failures} failed job skipped)") + raise Exception(message) + + +def get_next_job( + jobs: Sequence[submitit.Job], poll_frequency: float = 10 +) -> submitit.Job: + """ + Waits for any of the job to finish and returns it. + + jobs: list of jobs + poll_frequency: frequency in second at which we check job status + """ + start = time.time() + waiting = False + while True: + for job in jobs: + if job.done(): + return job + if not waiting: + job_ids = [j.job_id for j in jobs[:4]] + suffix = "..." if len(jobs) > 4 else "" + print( + f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}" + ) + waiting = True + time.sleep(poll_frequency) diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/flat_hash_set.py b/cc-multilingual-main/cc_net/build/lib/cc_net/flat_hash_set.py new file mode 100644 index 0000000000000000000000000000000000000000..f7529fe9b68d5251d23b5ace495524fda75c1b7b --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/flat_hash_set.py @@ -0,0 +1,247 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import sys +import time +import warnings +from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type + +import numpy as np + +HASH_TYPE: Type[np.uint64] = np.uint64 + +GETPY_WARNING = False + + +class AbstractDedupHashSet(Sized, Iterable[np.uint64]): + """A dict-like that returns `True` for keys that have been added more than once. + + The API is batched and expect np.array as input. This batching grants better + perf when using the C++ implementation. + """ + + dtype: Type[np.uint64] = HASH_TYPE + + def __repr__(self): + implementation = type(self).__name__ + return f"[{implementation}, len: {len(self)}" + + def __len__(self) -> int: + ... + + def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray: + ... + + def __getitem__(self, values) -> np.ndarray: + ... + + def __setitem__(self, keys, values) -> None: + ... + + def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]: + ... + + def keys(self) -> Iterable[np.uint64]: + ... + + def __iter__(self) -> Iterator[np.uint64]: + return iter(self.keys()) + + def add(self, h, contains=None): + """Add the given keys. First time a key is added the value is set to 0, + then it's set to one.""" + if not isinstance(h, np.ndarray): + h = np.array(h, dtype=HASH_TYPE) + if contains is None: + contains = self.__contains__(h) + + self.__setitem__(h, contains) + return contains + + def merge(self, keys, values): + contains = self.__contains__(keys) + self.__setitem__(keys, contains | values) + + def dump(self, filename): + return self.dump_np(filename) + + def load(self, filename): + return self.load_np(filename) + + def dump_np(self, filename): + kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)]) + items = np.fromiter(self.items(), dtype=kv_type, count=len(self)) + with open(filename, "wb") as f: + np.save(f, items) + + def load_np(self, filename): + items = np.load(str(filename)) + keys = items["k"].copy() + values = items["v"].copy() + self.merge(keys, values) + + def dump_np2(self, filename): + keys = np.fromiter( + (k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self) + ) + with open(filename, "wb") as f: + np.save(f, keys) + + values = np.fromiter( + (v for (k, v) in self.items()), dtype=np.uint8, count=len(self) + ) + with open(str(filename) + ".val", "wb") as f: + np.save(f, values) + + def load_np2(self, filename): + keys = np.load(filename) + values = np.load(str(filename) + ".val") + self.merge(keys, values) + + +class NaiveHashSet(dict, AbstractDedupHashSet): + """Pure python implementation of AbstractDedupHashSet. + + This implementation is quite fast, since Python dict are heavily optimized. + """ + + def __init__(self, iterable=None): + super().__init__() + global GETPY_WARNING + if GETPY_WARNING: + warnings.warn( + "Module 'getpy' not found. Deduplication will take more RAM." + " Try `pip install cc_net[getpy]" + ) + GETPY_WARNING = False + + def __contains__(self, values): + """Returns `True` if the object has been added at list once.""" + contains_point = super().__contains__ + return np.fromiter( + map(contains_point, values), count=len(values), dtype=np.uint8 + ) + + def __getitem__(self, values): + """Returns `True` if the object has been added at list twice.""" + get_point = super().get + return np.fromiter( + map(lambda x: get_point(x, False), values), + count=len(values), + dtype=np.uint8, + ) + + def __setitem__(self, keys, values): + assert len(keys) == len(values) + for k, v in zip(keys, values): + dict.__setitem__(self, k, v) + + +try: + import getpy as gp # type: ignore + + class _FlatHashSet(gp.Dict, AbstractDedupHashSet): + """C++ backed implementation of AbstractDedupHashSet. + + This implementation is slightly slower than the Python one but uses + 3x less RAM. + See https://github.com/atom-moyer/getpy. + """ + + def __init__(self): + super().__init__(HASH_TYPE, np.uint8, default_value=False) + + def __contains__(self, h): + """Returns `True` if the object has been added at list once.""" + if not isinstance(h, np.ndarray): + h = np.array(h, dtype=HASH_TYPE) + c = gp.Dict.__contains__(self, h) + c.dtype = np.uint8 + return c + + def dump(self, filename): + return self.dump_gp(filename) + + def load(self, filename): + return self.load_gp(filename) + + def dump_gp(self, filename): + return gp.Dict.dump(self, str(filename)) + + def load_gp(self, filename): + """Override gp.Dict.load, to correctly merge values instead of overwriting.""" + other = gp.Dict(HASH_TYPE, np.uint8, default_value=False) + other.load(str(filename)) + n = len(other) + keys = np.fromiter( + (k for (k, v) in other.items()), dtype=HASH_TYPE, count=n + ) + values = np.fromiter( + (v for (k, v) in other.items()), dtype=np.uint8, count=n + ) + self.merge(keys, values) + + FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet +except ImportError: + GETPY_WARNING = True + FlatHashSet = NaiveHashSet + + +def timeit(message, function, *args): + start = time.time() + function(*args) + end = time.time() + print(message, f"took {end - start:.0f}s") + + +def compare_load(*filenames): + assert filenames, "No file given" + + def load_list(): + hashes = [] + for f in filenames: + h = FlatHashSet() + h.load(f) + print(f"Loaded {h} from {f}.") + hashes.append(h) + return hashes + + def load_all(load, ext): + hashes = FlatHashSet() + for f in filenames: + load(hashes, f + ext) + + def dump_all(hashes, dump, ext): + for h, f in zip(hashes, filenames): + dump(h, f + ext) + + hashes = load_list() + dump_gp = getattr(FlatHashSet, "dump_gp") + if dump_gp is not None: + timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test") + timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test") + timeit( + "Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test" + ) + + load_gp = getattr(FlatHashSet, "load_gp") + if load_gp is not None: + timeit("Loading using gp.load", load_all, load_gp, ".gp.test") + timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test") + timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test") + + # Loading 10 shards: + # [dedup] Dumping using gp.dump took 52s + # [dedup] Dumping using dump_np took 270s + # [dedup] Dumping using dump_np2 took 483s + # + # [dedup] Loading using gp.load took 654s + # [dedup] Loading using load_np took 82s + # [dedup] Loading using load_np2 took 76s + + +if __name__ == "__main__": + compare_load(*sys.argv[1:]) diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/get_wiki_cirrus.py b/cc-multilingual-main/cc_net/build/lib/cc_net/get_wiki_cirrus.py new file mode 100644 index 0000000000000000000000000000000000000000..a3e1d43ff8773a6e15a4e290cd37597178eea42d --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/get_wiki_cirrus.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Creates mono-lingual corpus from Wikipedia. +""" + +import functools +import re +import subprocess +import urllib.request +from pathlib import Path +from typing import Dict + +import func_argparse +from bs4 import BeautifulSoup # type: ignore + +from cc_net import jsonql, text_normalizer + +CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch" +CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz") + + +def tmp(file: Path) -> Path: + return file.parent / ("tmp." + file.name) + + +def opening(file: Path, output: Path = None, n_docs: int = 1_000_000): + """Will dump the tokenized opening text of the given Wikipedia. + + Args: + - file: File containing the Wikipedia dump. + - output: Output file. + - n_docs: How many docs to parse + - tokenize: whether to tokenize the text + - lang: Language code used to chose the tokenizer + """ + assert file.exists() + return jsonql.run_pipes( + functools.partial(extract_opening_text, n_docs=n_docs), + file=file, + output=tmp(output) if output else None, + ) + if output: + tmp(output).replace(output) + + +def extract_opening_text(source, n_docs: int = 10_000): + i = 0 + for doc in jsonql.read_jsons(source): + if not doc: + continue + + text = doc.get("opening_text") + if not text: + continue + + yield text_normalizer.normalize(text) + i += 1 + if i >= n_docs: + break + + +def dl(lang: str, output_dir: Path, date: str = None): + """Download the cirrus extract for the given lang. + + See https://dumps.wikimedia.org/other/cirrussearch for the full list of files. + + Args: + - lang: The Wikipedia code for the language. + - output_dir: Output directory. File will be `{lang}.json.gz` + - date: Date of a specific Cirrus dump. + """ + + urls = get_cirrus_urls(date) + assert ( + lang in urls + ), f"--lang {lang} not found. Available languages are: {urls.keys()}" + + assert output_dir, "--output_dir folder needed." + output_dir.mkdir(exist_ok=True) + output = output_dir / (lang + ".json.gz") + print(f"Downloading {lang} wiki from {urls[lang]} to {output}") + wget(urls[lang], output) + + +def get_cirrus_urls(date: str = None) -> Dict[str, str]: + if date is None: + cirrus_page = BeautifulSoup( + urllib.request.urlopen(CIRRUS_URL), features="html.parser" + ) + dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")] + dumps.remove("..") + dumps.remove("current") + # We take the oldest dump since the most recent might be incomplete. + # The page only link to the N latest dumps so the dump won't be too old. + date = min(dumps) + + cirrus_url = "/".join((CIRRUS_URL, date)) + print("Will use the Wikipedia dump from:", date, cirrus_url) + cirrus_page = BeautifulSoup( + urllib.request.urlopen(cirrus_url), features="html.parser" + ) + urls = {} + for link in cirrus_page.findAll("a"): + match = CIRRUS_DUMP_RE.match(link.get("href")) + if not match: + continue + + urls[match.group(1)] = "/".join([cirrus_url, link.get("href")]) + assert urls, f"No valid download urls found at {cirrus_url}" + return urls + + +def wget(url: str, output: Path): + subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True) + tmp(output).replace(output) + assert ( + output.stat().st_size > 10_000 + ), f"File {output} downloaded from {url} looks too small" + + +if __name__ == "__main__": + func_argparse.main(dl, opening) diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/jsonql.py b/cc-multilingual-main/cc_net/build/lib/cc_net/jsonql.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ab405a2af88c56874adef04bd790859640421a --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/jsonql.py @@ -0,0 +1,1340 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Manipulate files containing one json per line. +""" +import argparse +import collections +import contextlib +import functools +import glob +import gzip +import importlib +import inspect +import io +import itertools +import json +import logging +import multiprocessing +import os +import re +import sys +import tempfile +import time +import typing as tp +import warnings +import zlib +from pathlib import Path +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + TextIO, + Tuple, + Union, +) + +import numpy as np +import psutil # type: ignore +import requests +from typing_extensions import Protocol + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s", + datefmt="%Y-%m-%d %H:%M", +) + +NEWLINE = " N3WL1N3 " + +FilterFn = Callable[[dict], bool] +FileDescriptor = Union[Path, List[Path], str] +WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None] +ReadableFileLike = Union[Iterable[str], FileDescriptor, None] + + +def io_parser(): + """Parser shared by all commands to get input/output files.""" + parser = argparse.ArgumentParser(add_help=False) + file_help = """File to read from. Can be specified several times for several files. + Be careful that bash will expand glob patterns **before** sending the args + to python. To use globs put it inside single quotes: + jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1 + jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1 + [Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1 + [Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1 + """ + parser.add_argument("-f", "--file", type=Path, action="append", help=file_help) + parser.add_argument("-o", "--output", type=Path, default="-") + parser.add_argument("--processes", type=int, default=1) + return parser + + +def get_parser(): + parser = argparse.ArgumentParser( + description="Read a set of json files and allow to query them" + ) + subparsers = parser.add_subparsers() + + def add_subparser(function, arguments): + doc = function.__doc__.split("\n")[0] + p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()]) + p.set_defaults(command=function) + for k, v in arguments.items(): + p.add_argument(k, **v) + + add_subparser( + select, + { + "columns": dict(nargs="+", help="Extract the value of the given fields"), + "--skip_empty": dict( + action="store_true", help="Skip lines without the requested fields" + ), + "--separator": dict( + default="\t", help="Separator to use between the different columns" + ), + "--newline": dict( + default=NEWLINE, + help="Replace newlines found in the text by the given string", + ), + }, + ) + + add_subparser( + where, + { + "clauses": dict(nargs="+", help=""), + "--requires": dict( + action="append", help="Python module required by the clauses code." + ), + }, + ) + + add_subparser( + merge, + { + "columns": dict(nargs="+", help=""), + "--separator": dict( + default="\t", help="Separator to use between the different columns" + ), + "--newline": dict( + default=NEWLINE, help="Replace the given string by actual newlines" + ), + }, + ) + + add_subparser( + describe, + { + "columns": dict(nargs="*", help=""), + "--bins": dict( + default="auto", help="Number of bins for computing the histograms" + ), + "--cumulative": dict( + action="store_true", help="Compute cumulative histograms" + ), + "--weights": dict(type=str, help="Column used to weight histograms"), + }, + ) + + add_subparser(split, {"--pattern": dict(type=str)}) + add_subparser(shard, {}) + return parser + + +def _split_array(array, sep): + last = 0 + for i, x in enumerate(array): + if x != sep: + continue + yield array[last:i] + last = i + 1 + if last != len(array): + yield array[last:] + + +def main(raw_args): + parser = get_parser() + pipeline = [] + file = "-" + output = "-" + processes = 1 + + for args_group in _split_array(raw_args, "--"): + args = vars(parser.parse_args(args_group)) + command = args.pop("command") + file = args.pop("file") or file + output = args.pop("output") or output + processes = args.pop("processes") or processes + pipeline.append(as_pipe(command, args)) + + if not pipeline: + parser.print_help() + return + + run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes) + + +class Transformer: + """ + Wrapper around functions transforming documents. + + This allows `run_pipes` to automatically parallelize the pipeline. + Provides: + * Automatic logging. Logging can be changed with the `summary` method. + Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable. + * Automatic parallelization without pickling. The transformers are shared + across processes, and the object is usually not pickled. + * Basic pickling / unpickling in case it's still needed. + By default will only pickle the arguments passed to the constructor. + * Delayed initialization. Internal state which is not pickable should be set + inside the `_prepare` function. + """ + + parallelisable: bool = True + expect_json: bool = False + warn_when_pickling: bool = False + ready: bool = False + + def __init_subclass__(cls, expect_json: bool = None): + """Detects if the subclass expects json as input.""" + spec = inspect.getfullargspec(cls.do) + if expect_json is None: + expect_json = spec.annotations.get(spec.args[1], None) == dict + + cls.expect_json = expect_json + + def __new__(cls, *args, **kwargs): + """Creates the transformer and save the arguments passed to the constructor.""" + t = super().__new__(cls) + Transformer.__init__(t, args, kwargs) + return t + + def __init__(self, state_args: tuple = None, state_kwargs: dict = None): + """ + Init the transformer counters. + + If state_args/state_kwargs are set they will override whatever was + originally passed to the subclass constructor. + """ + if state_args is not None: + self.__args = state_args + if state_kwargs is not None: + self.__kwargs = state_kwargs + + self.start_time = time.time() + self.__last_log = self.start_time + self.processed = 0 + # Log every 5 min unless specified other wise. + self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60)) + self.__cls = type(self) + self._logger = logging.getLogger(self.__cls.__name__) + + def __call__(self, x): + assert self.ready, f"{self} is not ready." + if x is None: + return + y = self.do(x) + self.processed += 1 + if time.time() - self.__last_log > self._log_freq: + self.log_summary() + return y + + def do(self, x): + raise NotImplementedError(f"'do' not implemented in {type(self)}") + + def summary(self) -> List[str]: + return [self.speed_summary()] + + def speed_summary(self) -> str: + delay = time.time() - self.start_time + h = delay / 3600 + s = self.processed / delay + return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)." + + def log(self, message): + self._logger.info(message) + + def log_summary(self) -> None: + if not self.ready: + self.log("Not ready.") + return + summ = self.summary() or [] + for line in summ: + self.log(line) + self.__last_log = time.time() + + def map(self, source: Iterable) -> Iterator: + if self.ready: + for x in source: + yield self(x) + # since we have been prepared by caller, + # caller is also responsible for calling `close`. + return + else: + with self: + for x in source: + yield self(x) + + def __getstate__(self) -> Tuple[tuple, dict, bool]: + return (self.__args, self.__kwargs, self.expect_json) + + def __setstate__(self, state: Tuple[tuple, dict, bool]): + if self.warn_when_pickling: + warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.") + (args, kwargs, expect_json) = state + # When unpickling `__new__` isn't called so we have to doit ourselves. + Transformer.__init__(self, state_args=args, state_kwargs=kwargs) + type(self).__init__(self, *args, **kwargs) + assert self.expect_json == expect_json + # __setstate__ is called by multiprocessing right before calling + # the object so we need to initialize everything. + self.__enter__() + + def _prepare(self) -> None: + pass + + def __enter__(self) -> "Transformer": + # In multiprocessing __enter__ is always called twice, so we are idempotent. + # Because we call __enter__ when deserializing this transformer and + # also when the parent transformer is deserialized. + self.start_time = time.time() + if self.ready: + return self + self._prepare() + self.ready = True + return self + + def __exit__(self, *args) -> None: + self.close() + self.log_summary() + + def close(self) -> None: + pass + + +def as_pipe(transformer, kwargs): + if isinstance(transformer, type): + return transformer(**kwargs) + return lambda source: transformer(source, **kwargs) + + +def compose(fns: List[Transformer]) -> Transformer: + if len(fns) == 1: + return fns[0] + return MultiTransformer(fns) + + +class MultiTransformer(Transformer): + def __init__(self, transformers: List[Transformer]): + super().__init__() + self.transformers = transformers + + def __repr__(self) -> str: + pipeline = " | ".join(type(t).__name__ for t in self.transformers) + return f"<{pipeline}>" + + def do(self, x): + for t in self.transformers: + x = t(x) + return x + + def _prepare(self): + for t in self.transformers: + t.__enter__() + return self + + def __exit__(self, *args): + for t in self.transformers: + t.__exit__(*args) + + def summary(self): + return itertools.chain(*(t.summary() for t in self.transformers)) + + +class Mapper(Transformer): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def do(self, x): + return self.fn(x) + + +def run_pipe( + command, + kwargs: dict = None, + file: ReadableFileLike = None, + output: WritableFileLike = None, +): + kwargs = kwargs or {} + if isinstance(kwargs, argparse.ArgumentParser): + kwargs = vars(kwargs.parse_args()) + file = file or Path(kwargs.pop("file", "-")) + output = output or Path(kwargs.pop("output", "-")) + + return run_pipes(as_pipe(command, kwargs), file=file, output=output) + + +def run_pipes( + *fns: Union[Transformer, Callable[[Iterable], Iterable]], + inputs: Iterable[dict] = None, + file: ReadableFileLike = None, + output: WritableFileLike = None, + processes: int = 1, + chunksize: int = 10_000, +): + """ + Run full document processing pipeline. + + - fns: list of functions to run over the documents. Can be: + * `Iterable -> Iterable` function + * jsonql.Transformer instance + Using transformers allow the pipeline to process documents in parallel. + - inputs: iterable to read the documents from + - file: if inputs is not given, will read documents from this file. + - output: writable file like. + - processes: number of processes to use. -1 means all CPU available. + - chunksize: chunksize for multiprocessing.Pool.imap_unordered + """ + expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json + if expect_json and inputs is None: + fns = (JsonReader(),) + fns + transformers = [] + for t in fns: + if not isinstance(t, Transformer): + break + if not t.parallelisable: + break + transformers.append(t) + pipes = fns[len(transformers) :] + + log = logging.getLogger(__name__).info + if inputs is None: + data: Iterable = open_read(file) + else: + data = inputs + + if processes == -1: + processes = os.cpu_count() or 0 + + with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack: + if transformers: + log(f"preparing {transformers}") + transform = stack.enter_context(compose(transformers)) + if processes <= 1: + data = transform.map(data) + else: + p = multiprocessing.current_process() + log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}") + pool = stack.enter_context( + multiprocessing.Pool( + processes=processes, + initializer=_set_global_transformer, + initargs=(transform,), + ) + ) + data = pool.imap_unordered( + _global_transformer, data, chunksize=chunksize + ) + + for fn in pipes: + if isinstance(fn, Transformer): + data = fn.map(data) + else: + data = fn(data) + + write_jsons(data, output) + + +# Allows to share transformer acroos subprocess. +# Used by `run_pipes` +_GLOBAL_TRANSFORMER: Optional[Transformer] = None + + +def _set_global_transformer(transformer: Transformer): + global _GLOBAL_TRANSFORMER + p = multiprocessing.current_process() + logging.info( + f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}" + ) + assert transformer.ready, f"{transformer} isn't ready" + _GLOBAL_TRANSFORMER = transformer + + +def _global_transformer(document: str) -> Optional[dict]: + assert _GLOBAL_TRANSFORMER is not None + return _GLOBAL_TRANSFORMER(document) + + +def lines(file: ReadableFileLike) -> Iterator[str]: + return (line.strip("\n") for line in open_read(file)) + + +def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]: + reader = JsonReader(strict=strict) + lines = open_read(file) + for line in lines: + if line is None: + continue + yield reader(line) + + reader.log_summary() + + +def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None: + eol = os.linesep + with open_write(file) as o: + for res in source: + if res is None: + continue + if isinstance(res, dict): + json.dump(res, o, ensure_ascii=False) + o.write(eol) + continue + if isinstance(res, str): + res = res.rstrip("\n") + print(res, file=o) + + +class JsonReader(Transformer): + def __init__(self, strict: bool = False): + super().__init__() + self.ready = True + self.strict = strict + self.num_errors = 0 + + def do(self, line: str) -> Optional[dict]: + if line is None: + return None + if isinstance(line, dict): + return line + line = line.rstrip("\n") + if not line: + return None + try: + return json.loads(line) + except json.decoder.JSONDecodeError as e: + self.log_error(e) + if self.strict: + raise + return None + + def log_error(self, e: json.decoder.JSONDecodeError): + self.num_errors += 1 + if self.num_errors > 10: + return + + MAX_LEN = 80 + snippet, snippet_len = e.doc, len(e.doc) + col = e.pos + if snippet_len > MAX_LEN: + if col < MAX_LEN: + start = 0 + elif snippet_len - col < MAX_LEN: + start = snippet_len - MAX_LEN + else: + start = col - MAX_LEN // 2 + snippet = e.doc[start : start + MAX_LEN] + col = col - start + logging.warning( + "\n".join( + [ + f"Invalid json (length={len(e.doc)}) {e}", + snippet, + " " * (col - 1) + "^", + ] + ) + ) + + def summary(self): + summ = super().summary() + if self.num_errors > 0: + summ.append(f"Skipped {self.num_errors} invalid json.") + return summ + + +def compile_column(column, newline): + if callable(column): + return column + + if column == "*": + return json.dumps + + if re.match(r"[_a-z][_a-z0-9]*", column): + + def extract_col(doc): + v = doc.get(column, "") + if isinstance(v, str) and newline != "\n": + v = v.rstrip("\n").replace("\n", newline) + return v + + return extract_col + + return compile_expr(column) + + +def select(lines, columns, skip_empty=False, separator="\t", newline="\n"): + """Yields the content of the requested columns.""" + column_parsers = [compile_column(c, newline) for c in columns] + for doc in read_jsons(lines): + values = [] + empty = True + for parse_col in column_parsers: + v = parse_col(doc) + values.append(str(v) or "") + empty = empty and v is None + + if skip_empty and empty: + continue + + yield separator.join(values) + + +def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None): + if not isinstance(clause, str): + return clause + + args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})" + args_list = list(re.findall(args_re, clause)) + if not args_list: + # This is only a warning because you may want to have eg random sampling + # that doesn't depend on the document. + logging.warn( + f"Warning: No variable found in expression: <{clause}>\n" + "Variables should be written inside braces, eg: {language}=='en'" + ) + python_like = re.sub(args_re, r"doc.get('\1', None)", clause) + requires = requires or [] + modules = {r: importlib.import_module(r) for r in requires} + return eval(f"lambda doc: {python_like}", modules) + + +class where(Transformer): + """Filters the data using python code. + + Ex: `jsonql where 'len({text}) > 100'` + """ + + def __init__( + self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = [] + ): + super().__init__() + self.raw_clauses = clauses + self.requires = requires + self.n_selected = 0 + self.clauses: List[FilterFn] = [] + + def _prepare(self): + self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses] + + def do(self, doc: dict) -> Optional[dict]: + assert self.clauses + if not doc or not all((c(doc) for c in self.clauses)): + return None + self.n_selected += 1 + return doc + + def summary(self): + n_selected, n_docs = self.n_selected, self.processed + selectivity = n_selected / n_docs if n_docs else 0 + return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"] + + +def merge(lines, columns, separator="\t", newline=NEWLINE): + """Reads tab separated columns and output a json using the given headers. + + Headers are of form {key}[%{type}] + {type} can be one of {"f": float, "i": int, "b": bool, "s": string}. + Default type is string. + A special header "_" means interpret this column as json, and append all other + columns to it. Must appear only once and on last position. + + Ex: + `echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}` + `echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}` + `echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}` + """ + handle_newlines = lambda s: s.replace(newline, "\n") + type_mapping: Dict[str, Callable] = { + "f": float, + "i": int, + "b": bool, + "s": handle_newlines, + } + type_parsing = [ + type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns + ] + columns = [f.split("%")[0] for f in columns] + doc_index = columns.index("_") if "_" in columns else -1 + read_json = JsonReader() + + def parse(line): + parts = line.split(separator, len(columns) - 1) + doc: Dict[str, tp.Any] = {} + for i, value in enumerate(parts): + if columns[i] == "_": + doc.update(read_json(parts[doc_index])) + else: + try: + doc[columns[i]] = type_parsing[i](value) + except ValueError: + logging.error( + f"Error when parsing column {i} of line: {line[:100]}..." + ) + return doc + + for line in lines: + yield json.dumps(parse(line)) + + +class split(Transformer): + """Split a files in several smaller files based on the value of a field.""" + + # Not parallelisable since we are writing to files. + parallelisable = False + + def __init__( + self, + pattern: Union[Path, str] = None, + split_fn: Callable[[dict], str] = None, + mkdir: bool = False, + ): + super().__init__() + assert not ( + pattern and split_fn + ), "split can't have both a pattern and a split_fn" + if split_fn is not None: + self.split_fn = split_fn + else: + assert pattern, "split need either a pattern or a split_fn" + self.split_fn = self.make_split_fn(str(pattern)) + self.mkdir = mkdir + self.o: dict = {} + + def make_split_fn(self, pattern: str) -> Callable[[dict], str]: + candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern)) + return lambda doc: pattern.format(**{c: doc[c] for c in candidates}) + + def do(self, doc): + filename = self.split_fn(doc) + if not filename: + return + o = self.o.get(filename, None) + if o is None: + if self.mkdir: + Path(filename).parent.mkdir(parents=True, exist_ok=True) + self.o[filename] = open_write(filename) + print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True) + + def summary(self): + summ = super().summary() + summ.append(f"Found {len(self.o)} splits.") + return summ + + def close(self): + for file in self.o.values(): + file.close() + + +def histogram(values, bins, weights): + hist, bins = np.histogram(values, bins=bins) + # n_bins = len(hist) + + if weights is not None: + # Bins can't be auto-determined if weights is supplied. + # So we first compute the bins without the weights then recompute + # the histogram with the weights. + hist, bins = np.histogram(values, bins=bins, weights=weights) + # cumsum = np.cumsum(hist) + # total = cumsum[-1] + + # for i in range(n_bins - 1): + # if cumsum[i] / total > 0.9: + # useful_range = np.linspace(bins[0], bins[i + 1], n_bins) + # new_bins = np.append(useful_range, [bins[-1]]) + # return np.histogram(values, bins=new_bins, weights=weights) + + return hist, bins + + +def _parse_bins(bins): + try: + if isinstance(bins, str): + if "," in bins: + bins = [int(b) for b in bins.split(",")] + else: + bins = int(bins) + except ValueError: + pass + return bins + + +ALL_DOCUMENTS = "" +MAX_LABEL_LEN = 100 + + +def bar_chart(hist, bins): + n = sum(hist) + max_h = max(hist) + out = [] + for i, h in enumerate(hist): + h_size = 80 * h // max_h + dh_size = 80 * (h - hist[i - 1]) // max_h + if h_size == 0 or dh_size == 0: + continue + bar = "█" * h_size + out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}") + out.append(f"{bins[-1]:8.3f}") + return out + + +def display_stats(stats, key, weights=None, bins="auto", cumulative=False): + out = [] + documents = stats[ALL_DOCUMENTS] + count = stats.get(key, 0) + r = count / documents if documents else 0 + out.append(f"Field {key} saw {count} times ({r:5.1%})") + + length = stats.get(key + ".length", None) + avg_length = length // count if length else 0 + if length is not None: + out[-1] += f", average length is {length // count}" + + values = stats.get(key + ".val", None) + if values: + out[-1] += f", histogram is: (bins={bins})" + if weights: + if weights not in stats: + logging.warn(f"Warning: weights column {weights} not found.") + if weights + ".val" not in stats: + logging.warn( + f"Warning: weights column {weights} is not a numeric column." + ) + weights = stats.get(weights + ".val") + hist, bins = histogram(values, _parse_bins(bins), weights) + if cumulative: + hist = np.cumsum(hist) + out += bar_chart(hist, bins) + + cnt = stats.get(key + ".cnt", None) + if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1: + cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True) + out[-1] += ", top 100 labels:" + for label, n in cnt[:100]: + if n < 5: + continue + out.append(f"{label:25}: {n:6} ({n / count:5.1%})") + + return out + + +def describe(source, columns=None, weights=None, **kwargs): + """Compute some statistics about a dataset. + + Stats can be restricted to a subset of columns.""" + MAX_HIST_SIZE = 100_000_000 + MAX_CNT_SIZE = 1000 + stats = {ALL_DOCUMENTS: 0} + needed = columns + [weights] if columns else None + + for doc in read_jsons(source): + stats[ALL_DOCUMENTS] += 1 + for k, v in doc.items(): + if needed and k not in needed: + continue + stats[k] = get_or_set(stats, k, 0) + 1 + if isinstance(v, str): + stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v) + if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels + continue + cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int)) + if v in cnt or len(cnt) < MAX_CNT_SIZE: + cnt[v] += 1 + elif type(v) in (int, float): + values = get_or_set(stats, k + ".val", []) + if len(values) < MAX_HIST_SIZE: + values.append(v) + elif type(v) is list and len(v) and type(v[0]) in (int, float): + values = get_or_set(stats, k + ".val", []) + if len(values) < MAX_HIST_SIZE: + values += v + elif type(v) is dict: + cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int)) + for label in v: + if label in cnt or len(cnt) < MAX_CNT_SIZE: + cnt[label] += 1 + + documents = stats[ALL_DOCUMENTS] + yield f"Stats computed on {documents} documents:" + for k in stats: + if columns and k not in columns: + continue + if "." in k or k == ALL_DOCUMENTS: + continue + for line in display_stats(stats, k, weights=weights, **kwargs): + yield line + + +def shard(lines): + """Shard a file in several smaller ones.""" + # The creation of the shard is handle in a generic way. Do we need this ? + return lines + + +# *** Utils *** + + +def get_or_set(dictionary, key, default): + if key not in dictionary: + dictionary[key] = default + return dictionary[key] + + +class SimpleIO(Protocol): + """A subset of methods from TextIO.""" + + def close(self) -> None: + ... + + def write(self, line: str) -> int: + ... + + def __enter__(self) -> "SimpleIO": + ... + + def __exit__(self, exc_type, exc_value, traceback): + ... + + +def open_read(filename: ReadableFileLike) -> Iterable[str]: + """Open the given file, list of files or files matching the given glob and read lines. + + `filename` is None or "-" -> reads from stdin + `filename` is a Path / str -> interprets filename as a glob and open files matching it + `filename` is a list -> opens sequentially all files from the list using `open_read` + `filename` is something else -> returns the object wrapped in a `nullcontext` + This allows to pass already openened files or iterables. + + `open_read` will decompress gzip files, given they have ".gz" suffix. + """ + if filename is None: + return sys.stdin + + if isinstance(filename, list): + assert isinstance(filename[0], Path) + if len(filename) == 0: + return [] + if len(filename) > 1: + return _yield_from(filename) + filename = tp.cast(Path, filename[0]) + if isinstance(filename, str): + if filename.startswith("http://") or filename.startswith("https://"): + return open_remote_file(filename) + + filename = Path(filename) + if not isinstance(filename, Path): + # we might have received an iterable, return it unmodified. + return filename # type: ignore + + # Expand glob patterns only when reading + files = [Path(f) for f in sorted(glob.glob(str(filename)))] + if len(files) > 1: + return _yield_from(files) + if len(files) == 1: + filename = files[0] + + assert isinstance(filename, Path) + + if filename.name.endswith("]"): + return block_reader(filename) + + logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'") + if filename.suffix == ".gz": + file: TextIO = gzip.open(filename, "rt") # type: ignore + else: + file = open(filename, "rt") + + return _close_when_exhausted(file) + + +def _close_when_exhausted(file: TextIO) -> Iterable[str]: + with file: + yield from file + + +def _yield_from(files: list) -> Iterable[str]: + for file in files: + yield from open_read(file) + + +def open_write( + filename: WritableFileLike, max_size: str = "4G" +) -> tp.ContextManager[TextIO]: + """Open the given file, list of files or files matching the given glob. + + The return value is a ContextManager meant to be used inside a `with` block: + ``` + with open_write("foo.txt") as o: + ... + + Write mode: + replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`. + If filename ends with ".gz", creates a blocked gzip file with random access. + """ + if filename is None: + return contextlib.nullcontext(sys.stdout) + + if isinstance(filename, list): + if len(filename) > 1: + return MultiFile(filename, "w", max_size) + else: + filename = tp.cast(Path, filename[0]) + if isinstance(filename, str): + filename = Path(filename) + if not isinstance(filename, Path): + assert hasattr(filename, "write"), f"{filename} doesn't have a .write method." + # We return a 'TextIO' even though we only check for `.write` method, + # this works better with eg `print`. + return contextlib.nullcontext(tp.cast(TextIO, filename)) + + mode = "wt" + if "?" in filename.name: + return sharded_file(filename, mode, max_size) + + logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}") + # TODO: should we use another format ? + if filename.suffix == ".gz": + return BlockedGzipWriter(Path(filename), mode, block_size="64M") + + return open(filename, "wt") + + +def parse_size(size): + unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3} + unit = size[-1].upper() + assert ( + unit in unit_map + ), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}." + return int(size[:-1]) * unit_map[unit] + + +class MultiFile(SimpleIO): + def __init__(self, files: Iterable[Path], mode="w", max_size="4G"): + self.name = str(files) + self.mode = mode + self.files = iter(files) + self.max_size = parse_size(max_size) + self.current_handle: Optional[TextIO] = None + self.current_block_size = 0 + self._open_next_handle() # Opening 1st handle allows to write directly. + + def write(self, content) -> int: + # Avoid splitting newlines to a new file. + # use current_block_size since it's faster than `tell()` + if content != "\n" and self.current_block_size >= self.max_size: + self._open_next_handle() + if self.current_handle is None: + raise Exception("No more files to write to...") + + written = self.current_handle.write(content) + self.current_block_size += written + return written + + def _open_next_handle(self) -> bool: + self.close() + file = next(self.files, None) + if file is None: + return False + + self.current_handle = open_write(file).__enter__() + self.current_block_size = 0 + return True + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + @property + def closed(self): + return self.current_handle is None + + def close(self): + if self.current_handle is None: + return + + # log("Closing", self.current_handle.name, "with mode", self.current_handle.mode) + self.current_handle.__exit__(None, None, None) + self.current_handle = None + + +# not sure it helps since connections are reseted anyway. +_session = functools.lru_cache()(requests.Session) + + +def request_get_content(url: str, n_retry: int = 3) -> bytes: + """Retrieve the binary content at url. + + Retry on connection errors. + """ + t0 = time.time() + logging.info(f"Starting download of {url}") + for i in range(1, n_retry + 1): + try: + r = _session().get(url) + r.raise_for_status() + break + except requests.exceptions.RequestException as e: + # Sleep and try again on error, unless it's a 404. + message = e.args[0] if isinstance(e.args[0], str) else "" + if i == n_retry or "Client Error" in message: + raise e + warnings.warn( + f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})" + ) + time.sleep(10 * 2 ** i) + dl_time = time.time() - t0 + dl_speed = len(r.content) / dl_time / 1024 + logging.info( + f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)" + ) + return r.content + + +def open_remote_file(url: str, cache: Path = None) -> Iterable[str]: + """Download the files at the given url to memory and opens it as a file. + Assumes that the file is small, and fetch it when this function is called. + """ + if cache and cache.exists(): + return open_read(cache) + + # TODO: open the remote file in streaming mode. + # The hard part is that we need to write the content on disk at the same time, + # to implement disk caching. + raw_bytes = request_get_content(url) + content = io.BytesIO(raw_bytes) + if url.endswith(".gz"): + f: TextIO = gzip.open(content, mode="rt") # type: ignore + else: + f = io.TextIOWrapper(content) + + if cache and not cache.exists(): + # The file might have been created while downloading/writing. + tmp_cache = _tmp(cache) + tmp_cache.write_bytes(raw_bytes) + if not cache.exists(): + tmp_cache.replace(cache) + else: + tmp_cache.unlink() + + return _close_when_exhausted(f) + + +def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile: + folder, name = file_pattern.parent, file_pattern.name + assert "?" in name, f"Can't expand give file_pattern: {file_pattern}" + + n = name.count("?") + assert 0 < n < 8 + assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}" + assert "r" not in mode + files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n)) + + return MultiFile(files, mode, max_size) + + +class SplitFile: + def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"): + assert mode == "r" + size = os.path.getsize(filename) + self.handle = open(filename, mode) + start = chunk * size // n_chunks + self.end: int = (chunk + 1) * size // n_chunks + + if start > 0: + self.handle.seek(start - 1) + # Skip incomplete line. This avoid crashing when reading eg the middle + # of a unicode char. `self.handle.buffer` is a binary file reader. + self.handle.buffer.readline() # type: ignore + + def __enter__(self): + return self + + def __iter__(self): + while True: + line = self.handle.readline() + if not line: + return + + yield line + if self.handle.tell() >= self.end: + return + + def readlines(self): + return list(self.__iter__()) + + def close(self): + self.handle.close() + + def __exit__(self, *args): + self.close() + + +def get_block_readers(filename: Path, n_readers, mode="t"): + index_filename = filename.parent / (filename.name + ".index") + if not index_filename.exists(): + return [gzip.open(filename, "r" + mode)] + index: List[int] = np.load(index_filename) + n_chunks = len(index) + chunk_per_reader = int(np.ceil(n_chunks / n_readers)) + n_readers = int(np.ceil(n_chunks / chunk_per_reader)) + + start = 0 + readers = [] + for i in range(n_readers): + end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)] + r = _blocked_gzip_reader(filename, start, end, mode) + readers.append(r) + start = end + return readers + + +def block_reader(filename: Path) -> Iterable[str]: + root, pattern = str(filename)[:-1].split("[", 1) + assert root.endswith(".gz"), "Can only read block of a .gz file for now." + + ii, nn = pattern.strip().split("/") + i, n_readers = int(ii), int(nn) + + index_filename = root + ".index" + assert os.path.exists( + index_filename + ), f"Index {index_filename} not found for {filename}" + index: List[int] = np.load(index_filename) + n_chunks = len(index) + chunk_per_reader = int(np.ceil(n_chunks / n_readers)) + n_readers = int(np.ceil(n_chunks / chunk_per_reader)) + # I'm not sure how to handle the case where there is less reader than expected. + # Currently we return empty readers. + + start = 0 + if i > 0: + start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)] + end = index[min(i * chunk_per_reader, n_chunks - 1)] + return _blocked_gzip_reader(root, start, end, mode="t") + + +def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]: + handle = gzip.open(filename, "r" + mode) + handle.seek(start) + try: + while handle.tell() < end: + line = handle.readline() + if not line: + break + yield line + finally: + handle.close() + + +class BlockedGzipWriter(MultiFile): + """Writes a Gzip files which can be read by block. + + Decreasing the block size may hurt compression, but provides more split points. + """ + + def __init__(self, filename: Path, mode: str, block_size: str = "256M"): + assert "w" in mode + self.filename = Path(filename) + self.index: List[int] = [] + self.zipfile: Optional[gzip.GzipFile] = None + super().__init__([], mode, block_size) + + def _open_next_handle(self) -> bool: + """Here we never actually close/open handles, + we just write the end of block sequence.""" + if not self.current_handle: + mode = self.mode + "t" + self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode)) + assert isinstance(self.current_handle.buffer, gzip.GzipFile) + self.zipfile = self.current_handle.buffer + return True + + # Use Z_FULL_FLUSH to allow random access: + # https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313 + self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore + self.index.append(self.current_handle.tell()) + self.current_block_size = 0 + return True + + def flush(self): + assert self.current_handle is not None + self.current_handle.flush() + + def close(self): + if self.current_handle is None: + return + self.current_handle.flush() + self.index.append(self.current_handle.tell()) + self.current_handle.close() + self.current_handle = None + index = np.array(self.index, dtype=np.uint64) + with open(str(self.filename) + ".index", "wb") as o: + np.save(o, index) + + +def grouper(iterable, n): + group = [] + for x in iterable: + group.append(x) + if len(group) == n: + yield group + group = [] + if group: + yield group + + +PROCESS = psutil.Process() + + +def mem_footprint_gb(pid=None): + rss = PROCESS.memory_info().rss + return rss / 1_000_000_000 + + +def _tmp(output: Path) -> Path: + suffix = "".join(output.suffixes) + suffix = ".tmp" + suffix + prefix = output.name[: -len(suffix)] + _, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix) + return Path(tmp_path) + + +@functools.lru_cache() +def _tmp_dir() -> Path: + job_id = os.environ.get("SLURM_JOB_ID") + if job_id: + return Path("/scratch/slurm_tmpdir") / job_id + + checkpoint = Path("/checkpoint") / os.environ.get("USER", "") + if checkpoint.exists(): + tmp = checkpoint / "tmp" + tmp.mkdir(exist_ok=True) + return tmp + + return Path("/tmp") + + +if __name__ == "__main__": + multiprocessing.set_start_method("fork") + main(sys.argv[1:]) diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/minify.py b/cc-multilingual-main/cc_net/build/lib/cc_net/minify.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5234ea77aecce6fbcff767f09845072657cb57 --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/minify.py @@ -0,0 +1,304 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import base64 +import hashlib +import itertools +import urllib.parse +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Sequence, Set, Union + +import numpy as np + +from cc_net import jsonql +from cc_net.execution import get_executor +from cc_net.jsonql import mem_footprint_gb + +HASH_SIZE = 4 +HASH_TYPE = np.uint32 + +PUBLIC_FIELDS = ["url", "digest"] +COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"] +DATA = Path(__file__).parent.parent / "data" + + +# This is similar to dedup methods but with use 32 bits hashes. +def _b2i(b: bytes) -> int: + return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0) + + +def _str_hash(s: str) -> int: + h = hashlib.sha1(bytes(s, encoding="utf-8")) + return _b2i(h.digest()) + + +def get_hashes(lines: Iterable[str]) -> List[bytes]: + h = HASH_SIZE + return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines] + + +def encode_hashes(hashes: Iterable[bytes]) -> str: + return base64.b64encode(b"".join(hashes)).decode("ascii") + + +def encode_as_hashes(lines: Iterable[str]) -> str: + return encode_hashes(get_hashes(lines)) + + +def decode_hashes(compact: str) -> List[bytes]: + all_hashes = base64.b64decode(compact) + res = [] + assert len(all_hashes) % HASH_SIZE == 0 + for i in range(len(all_hashes) // HASH_SIZE): + chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE] + res.append(chunk) + + return res + + +def encode_line_ids(line_ids: Sequence[int]) -> str: + arr = np.array(line_ids, dtype=" List[int]: + ids_bytes = bytearray(base64.b64decode(compact)) + return np.ndarray(len(ids_bytes) // 2, dtype=" int: + assert digest.startswith("sha1:") + h = base64.b32decode(digest[5:]) + return _b2i(h[:HASH_SIZE]) + + +class Minifier(jsonql.Transformer): + ready = True + + def __init__(self): + self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS) + + def do(self, doc: dict) -> Optional[dict]: + line_ids: List[int] = doc.pop("line_ids") + fields = self.fields + keys = list(doc.keys()) + for k in keys: + if k not in fields: + doc.pop(k, None) + p = doc.get("perplexity", 0) + doc["line_ids"] = encode_line_ids(line_ids) + if p: + doc["perplexity"] = round(p, 1) + s = doc.get("language_score", 0) + if s: + doc["language_score"] = round(s, 2) + return doc + + +class MetadataFetcher(jsonql.Transformer): + """Reads documents from CC snapshot and join precomputed metadata. + + CC snapshots are split in segments. Each segment is 64Mb long. + The metadata must also be stored in segments of the same size and names. + """ + + def __init__(self, folder: Union[Path, str]): + self.ready = True + self.metadata: Dict[int, dict] = {} + + self._segments: Set[str] = set() + self.read_doc = 0 + self.missed_doc = 0 + self.missed_par = 0 + self.processed_par = 0 + + if isinstance(folder, str): + # detect path passed as string + if urllib.parse.urlparse(folder).scheme == "": + folder = Path(folder) + assert folder.exists(), f"Metadata folder not found: {folder}" + + self.folder = folder + self.segment: str = "" + self.segments_read_twice = 0 + + def meta_file(self, segment: str) -> str: + file_name = segment.split("/")[-1] + assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet") + if isinstance(self.folder, str): + return urllib.parse.urljoin( + self.folder, file_name.replace(".warc.wet", ".json") + ) + meta_file = self.folder / file_name.replace(".warc.wet", ".json") + assert ( + meta_file.exists() + ), f"Couldn't find metadata file for segment {segment} at {meta_file}" + return str(meta_file) + + def fetch_metadata(self, segment: str) -> None: + meta_file = self.meta_file(segment) + k = get_doc_key + self.metadata = {} + collision = 0 + for m in jsonql.read_jsons(meta_file): + key = k(m["digest"]) + if key in self.metadata: + collision += 1 + self.metadata[key] = m + + self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}") + if collision > 0: + self._logger.warning(f"Found {collision} collisions !") + + self.segment = segment + if segment in self._segments: + self.log("Cache miss") + self.segments_read_twice += 1 + self._segments.add(segment) + + def do(self, doc: dict) -> Optional[dict]: + if self.segment != doc["cc_segment"]: + self.fetch_metadata(doc["cc_segment"]) + digest = doc["digest"] + key = get_doc_key(digest) + if key not in self.metadata: + return None + + metadata = self.metadata.pop(key) + return self.clean(metadata, doc) + + def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]: + line_ids = decode_line_ids(metadata.pop("line_ids")) + lines = full_doc["raw_content"].split("\n") + cleaned = [] + for l in line_ids: + if l >= len(lines) or l < 0: + self.missed_par += 1 + continue + cleaned.append(lines[l]) + + self.processed_par += len(line_ids) + if not cleaned: + self.missed_doc += 1 + return None + + full_doc["raw_content"] = "\n".join(cleaned) + full_doc["original_nlines"] = full_doc["nlines"] + full_doc["original_length"] = full_doc["length"] + full_doc["nlines"] = len(cleaned) + full_doc["length"] = len(full_doc["raw_content"]) + for key, value in metadata.items(): + full_doc[key] = value + return full_doc + + def summary(self) -> List[str]: + summ = super().summary() + mem = mem_footprint_gb() + len_cache = len(self.metadata) + summ.append( + f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g." + ) + if self.missed_doc: + r = self.missed_doc / self.processed + summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !") + + if self.missed_par: + r = self.missed_par / self.processed + summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !") + return summ + + +def _expand_files(files: List[Path]) -> List[Path]: + if len(files) == 1 and files[0].is_dir(): + folder = files[0] + files = sorted(folder.glob("*.json.gz")) + print(f"Found {len(files)} files under {folder}/*.json.gz") + assert files, "No files found" + return files + + +def minify_file(file: Path, output: Path) -> str: + """Minify the given file.""" + jsonql.run_pipes(Minifier(), file=file, output=output) + return f"Minified {output}" + + +def minify( + files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1 +): + """Minify all the files in the given folder.""" + files = _expand_files(files) + output_dir.mkdir(exist_ok=True) + with open(output_dir / "files.txt", "w") as o: + for f in files: + print(f.name, file=o) + outputs = [output_dir / f.name for f in files] + ex = get_executor( + "minify", + output_dir / "logs", + execution, + timeout_hour=2, + cpus=1, + task_parallelism=parallelism, + ) + ex(minify_file, files, outputs) + + +def fetch_metadata_file( + file: Union[Path, str], + metadata_dir: Union[Path, str], + output: Path, + cache_dir: Path = None, +): + unminifier = MetadataFetcher(metadata_dir) + tmp = output.with_name("tmp." + output.name) + jsonql.run_pipes(unminifier, file=file, output=tmp) + tmp.rename(output) + return f"Fetched metadata for {file}. Results at {output}." + + +def fetch_metadata( + files: List[str], + metadata_dir: Union[Path, str], + output_dir: Path, + execution: str = "mp", + parallelism: int = -1, + cache_dir: Path = None, +): + if len(files) == 1 and Path(files[0]).is_dir(): + folder = Path(files[0]) + files = [str(f) for f in sorted(folder.glob("*.json.gz"))] + print(f"Found {len(files)} files under {folder}/*.json.gz") + + assert len(files) > 0, "No files given." + output_dir.mkdir(exist_ok=True) + + outputs = [output_dir / str(f).split("/")[-1] for f in files] + if cache_dir is None: + cache_dir = output_dir / "wet_cache" + cache_dir.mkdir(exist_ok=True) + if str(cache_dir) == "none": + cache_dir = None + files = [f for f, o in zip(files, outputs) if not o.exists()] + outputs = [o for o in outputs if not o.exists()] + if not files: + return + ex = get_executor( + "unminify", + output_dir / "logs", + execution, + timeout_hour=8, + cpus=1, + task_parallelism=parallelism, + mem_gb=32, + ) + ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir)) + + +if __name__ == "__main__": + import func_argparse + + func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file) diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/split_by_lang.py b/cc-multilingual-main/cc_net/build/lib/cc_net/split_by_lang.py new file mode 100644 index 0000000000000000000000000000000000000000..e8c5c6224547d6372e936933d2387c5d3e86543c --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/split_by_lang.py @@ -0,0 +1,151 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import argparse +import collections +from pathlib import Path +from typing import Dict, Optional + +import fasttext # type: ignore + +from cc_net import jsonql + + +def get_args(): + parser = argparse.ArgumentParser( + description="Read a list of json files and split them ", + parents=[jsonql.io_parser()], + ) + parser.add_argument("--pattern", type=str) + parser.add_argument("--field", type=str, default="raw_content") + parser.add_argument("--threshold", type=float, default=0) + parser.add_argument("--model", type=str, required=True) + parser.add_argument("--out_field", type=str, default="language") + parser.add_argument("--top", type=int, default=1) + return vars(parser.parse_args()) + + +def predict(model, text: str, k: int = 1): + labels, scores = model.predict(text, k=k) + labels = [l.replace("__label__", "") for l in labels] + return labels, scores + + +def avg_predict(model, text): + # Overall gives the same results than predict(model, text.replace("\n", "")) + text = text.split("\n") + text_len = sum(len(line) for line in text) + if text_len == 0: + return None, 0 + scores = [predict(model, line) for line in text] + scores_by_label: Dict[str, float] = collections.defaultdict(float) + for (label, score), line in zip(scores, text): + scores_by_label[label] += score * len(line) + + label, score = max(scores_by_label.items(), key=lambda kv: kv[1]) + return label, score / text_len + + +class Classifier(jsonql.Transformer): + def __init__( + self, + model: Path, + field: str, + out_field: str, + threshold: float = 0, + top: int = 1, + language: str = None, + rounding: int = 2, + ): + super().__init__() + self.model = model + assert model.exists(), f"Model {model} doesn't exist." + self.field = field + self.out_field = out_field + self.threshold = threshold + self.top = top + self.language = language + self.rounding = rounding + # Fasttext model is a C object and can't be pickled + self.fasttext_model: fasttext._FastText = None + self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0 + self.cnt: Dict[str, int] = {} + + def _prepare(self): + self.log(f"Loading {self.model}") + self.fasttext_model = fasttext.load_model(str(self.model)) + + def predict(self, text): + return predict(self.fasttext_model, text.replace("\n", ""), k=self.top) + + def do(self, doc: dict) -> Optional[dict]: + text = doc.get(self.field, None) + if not text: + return None + + if self.language and doc.get("language") != self.language: + self.n_ignored += 1 + return doc + + self.n_doc += 1 + labels, scores = self.predict(text) + scores.round(self.rounding, out=scores) + for l in labels: + self.cnt[l] = self.cnt.get(l, 0) + 1 + + if self.top == 1: + existing_label = doc.get(self.out_field, None) + if existing_label and labels[0] != existing_label: + self.n_disagreement += 1 + + if all(s < self.threshold for s in scores): + return None + + self.n_accepted += 1 + if self.top == 1: + doc[self.out_field] = labels[0] + doc[self.out_field + "_score"] = scores[0] + else: + doc[self.out_field] = {l: s for l, s in zip(labels, scores)} + return doc + + def summary(self): + n_doc, n_accepted, n_disagreement, cnt, out_field = ( + self.n_doc, + self.n_accepted, + self.n_disagreement, + self.cnt, + self.out_field, + ) + summ = super().summary() + if self.threshold > 0: + ratio = n_accepted / n_doc if n_doc else 0 + summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})") + summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}") + + disagreement = n_disagreement / n_doc if n_doc else 0 + if disagreement: + summ.append(f"{out_field} disagreement is at {disagreement:.1%}.") + return summ + + def __repr__(self): + return f"Classifier({self.model})" + + +def classify_and_split(file, output, pattern, **kwargs): + classifier = Classifier(**kwargs) + splitter = jsonql.split(pattern) + jsonql.run_pipes(classifier, splitter, file=file, output=output) + + +if __name__ == "__main__": + args = get_args() + pattern = args.get("pattern") + if pattern: + classify_and_split(**args) + else: + args.pop("pattern") + jsonql.run_pipe(Classifier, args) diff --git a/cc-multilingual-main/cc_net/build/lib/cc_net/tokenizer.py b/cc-multilingual-main/cc_net/build/lib/cc_net/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c48c8a45a8bc31ea98b3b0eb49ac12298185c634 --- /dev/null +++ b/cc-multilingual-main/cc_net/build/lib/cc_net/tokenizer.py @@ -0,0 +1,79 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import time +from typing import Dict, Optional + +import sacremoses # type: ignore + +from cc_net import jsonql, text_normalizer + + +class RobustTokenizer(jsonql.Transformer): + """Moses tokenizer with the expected preprocessing.""" + + LANG_WITHOUT_ACCENT = {"en", "my"} + + def __init__(self, lang: str): + super().__init__() + self.lang = lang + self.moses = sacremoses.MosesTokenizer(lang) + self.rm_accent = lang in self.LANG_WITHOUT_ACCENT + self.ready = True + + def do(self, text: str): + text = text_normalizer.normalize( + text, accent=self.rm_accent, case=False, numbers=False, punct=True + ) + text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang) + return self.moses.tokenize(text, return_str=True, escape=False) + + +class DocTokenizer(jsonql.Transformer): + """Tokenize the text found in `output_field and store the result in `output_field`.""" + + def __init__( + self, + field: str, + output_field: str = "tokenized", + language_field: str = "language", + ): + super().__init__() + self.field = field + self.output_field = output_field + self.language_field = language_field + self.n_docs = 0 + self.tokenizers: Dict[str, RobustTokenizer] = {} + + def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]: + cache = self.tokenizers + if lang in cache: + return cache[lang] + if lang in ("th", "zh", "ja"): + # TODO find a tokenizer for those languages + return None + + cache[lang] = RobustTokenizer(lang) + return cache[lang] + + def do(self, document): + lang = document[self.language_field] + tok = self.get_tokenizer(lang) + if not tok: + return document + + self.n_docs += 1 + lines = document[self.field].split("\n") + tokenized = "\n".join(tok(l) for l in lines) + document[self.output_field] = tokenized + return document + + def summary(self): + delay = (time.time() - self.start_time) / 3600 + speed = self.n_docs / delay + return [ + f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)." + ] diff --git a/cc-multilingual-main/cc_net/cc_net/__init__.py b/cc-multilingual-main/cc_net/cc_net/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..602d26857d421a1161d92b669da1739f292f6c96 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# diff --git a/cc-multilingual-main/cc_net/cc_net/__init__.pyc b/cc-multilingual-main/cc_net/cc_net/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80360568924f7b9fce54fa7d32a2fe5e720109cf Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__init__.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__main__.py b/cc-multilingual-main/cc_net/cc_net/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..23508400a7c2e09c54e53b6ab00ed857a3555042 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/__main__.py @@ -0,0 +1,18 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + + +import func_argparse + +import cc_net.mine + + +def main(): + func_argparse.parse_and_call(cc_net.mine.get_main_parser()) + + +if __name__ == "__main__": + main() diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58891c12e3ba5d5d59184978ddc9ee8959851e7a Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-312.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..792298f61276e7bc12903a5518b70b042408b1d8 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-312.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aadf6481b8ce5e318b3307b8ba6a967726299daf Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/__init__.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d3404d7be4d73ed80c37bd69e5bb101aec40bfe Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-312.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bacd43dbafe010f2ce0de0ae9cd6af70b5eb3c3 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-312.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca21f7da53a428823817a56d111b383e7e41de47 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/__main__.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e837340336d547c429bfbd1ba8d7d8d35d069298 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7de7fcd4e9f929b048ad4e839b391cb0b6662c9 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/dedup.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8c23c262a195fec5e77b33afe9ea02e505a6f03 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8e26de71e12e482150fb9c2e1d55564eddbcecf Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/execution.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a829134d387aa52be41f8d16ee7c3c723ba312f Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f5627fed0c24f3971372d673b4ffcd880afafa9 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/flat_hash_set.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec638fd9be40e9453507ea542328f774ce294407 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e43babca11b9f1b335c605330d66c11f50dd3c Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/jsonql.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81b7575ea423d0cea2df5238debf4c728dd433b8 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e7e4d63a4b47bf0bde4dc91d15906f702a53aac Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/mine.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16fccaaee3c233c0811a5e25c348fec76f0bce6e Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beda2608baf0f1fb0fc247ebab464f0d26874a54 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/minify.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e69ae0a8684cd46d3c509efd0a4eb765943491c Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f83eaa313f19451edf44d25c35c5f68ddab1657 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/perplexity.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31bd1f75169ed39d00a858ca27a8acded1f1bfc6 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e1a5dc11293d4bb32b236795f25a103d22ee94 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/process_wet_file.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac0851fca047e1fb3832807006f0399b8ccc17d0 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18ae72906835d18d40005a581c447859184b913a Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/regroup.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1ee723f4d718db70599354852b3721e27ef20c8 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f58c630081e44c12ef32f34f3d8a1b6787374d7 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/split_by_lang.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-310.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc8c13ee6eb6f562fe97270571f8f1c55ba14602 Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-310.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-38.pyc b/cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bfed3f48109dde2e4c272c6b52cd0806415891d Binary files /dev/null and b/cc-multilingual-main/cc_net/cc_net/__pycache__/text_normalizer.cpython-38.pyc differ diff --git a/cc-multilingual-main/cc_net/cc_net/break.ipynb b/cc-multilingual-main/cc_net/cc_net/break.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cc-multilingual-main/cc_net/cc_net/data/cutoff.csv b/cc-multilingual-main/cc_net/cc_net/data/cutoff.csv new file mode 100644 index 0000000000000000000000000000000000000000..f283fc7337d7bed6710482e30c3ee348a3ae4428 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/data/cutoff.csv @@ -0,0 +1,101 @@ +,de,it,fr,nl,pl,pt,es,no,da,id,lt,fi,en,hu,ro,ko,ar,bn,fa,ru,uk,ml,my,lv,is,ca,ne,et,hy,ja,hr,hi,az,el,cs,bg,he,zh,ka,km,gu,mk,kn,mr,af,mn,kk,be +0,0,0,0,10,0,0,0,0,0,0,0,10,0,0,0,20,0,0,0,0,0,0,10,10,10,0,10,0,0,0,0,0,0,0,0,0,0,10,0,10,10,10,10,10,10,0,0,10 +1,150,100,70,170,50,70,70,170,160,210,70,400,160,30,70,20,100,70,110,70,40,480,550,430,410,80,330,470,210,400,290,190,130,210,270,120,400,600,230,410,510,190,730,460,130,240,50,170 +2,170,110,90,200,70,80,90,210,190,260,90,490,180,30,80,30,180,80,150,90,50,740,840,570,490,90,420,630,300,540,360,220,200,250,350,180,520,750,360,460,580,250,1020,630,190,300,110,250 +3,190,120,90,230,70,90,90,230,210,300,100,570,200,40,90,30,220,90,180,110,60,930,980,650,530,100,470,720,360,620,400,240,240,270,410,220,620,840,420,490,630,280,1170,720,220,330,150,300 +4,200,130,100,240,80,90,100,250,220,320,110,620,210,40,100,30,250,100,190,110,60,1080,1140,710,560,110,510,780,400,670,440,260,270,280,450,230,690,900,480,530,670,300,1280,790,260,360,190,340 +5,210,130,100,260,90,100,110,260,230,340,110,670,220,40,110,30,260,100,210,120,70,1210,1260,760,580,120,540,830,430,710,470,270,300,290,480,250,760,950,520,550,690,320,1370,840,290,380,210,370 +6,220,140,110,270,90,100,110,270,240,350,120,700,230,40,110,30,280,110,220,130,70,1310,1390,790,600,120,570,870,450,750,490,280,320,300,510,270,810,990,560,560,720,340,1440,880,310,400,240,390 +7,230,140,110,280,100,110,120,280,250,370,120,740,230,50,120,30,290,110,230,140,70,1400,1500,820,620,130,590,900,480,770,510,300,340,310,540,290,850,1030,580,570,740,350,1500,920,330,410,270,410 +8,230,150,110,290,100,110,120,290,260,380,130,770,240,50,120,40,310,120,240,140,80,1470,1590,840,640,130,610,930,500,800,530,310,360,320,560,300,880,1060,610,600,760,370,1550,950,350,430,280,430 +9,240,150,120,300,100,110,120,300,270,400,130,790,250,50,120,40,320,120,240,150,80,1540,1660,860,650,140,630,960,520,820,540,310,380,330,580,310,910,1090,630,610,780,380,1600,990,370,450,310,450 +10,250,160,120,310,110,120,130,310,270,410,140,810,250,50,130,40,330,130,250,150,90,1600,1740,880,660,140,650,980,530,840,550,320,390,340,600,320,940,1110,650,620,800,390,1640,1010,380,460,330,470 +11,250,160,120,310,110,120,130,310,280,420,140,830,260,50,130,60,340,130,260,150,90,1650,1810,900,680,150,660,1000,550,860,570,330,410,350,610,330,970,1140,670,640,820,400,1680,1040,390,480,350,480 +12,260,160,130,320,110,120,130,320,290,430,140,850,260,50,130,70,350,130,270,160,90,1700,1870,920,690,150,680,1020,570,880,580,340,420,350,630,340,990,1160,690,660,840,410,1720,1060,400,490,370,500 +13,270,170,130,330,120,130,140,330,290,440,150,870,270,60,140,80,360,140,270,160,90,1750,1930,930,700,150,690,1030,580,890,590,350,440,360,640,340,1010,1180,710,680,860,420,1760,1090,410,500,380,510 +14,270,170,130,340,120,130,140,340,300,450,150,890,270,60,140,110,370,140,280,170,100,1800,1990,950,710,160,700,1050,590,910,600,360,450,370,650,350,1030,1200,730,700,880,430,1790,1110,420,510,400,520 +15,280,170,140,340,120,130,140,340,300,460,150,900,280,60,140,110,380,140,290,170,100,1850,2040,960,720,160,720,1070,600,920,610,360,460,370,670,360,1050,1220,740,710,890,430,1820,1130,430,520,420,540 +16,280,180,140,350,120,130,140,350,310,470,160,920,280,60,150,120,380,150,290,170,100,1890,2080,980,730,160,730,1080,620,940,620,370,470,380,680,360,1070,1240,750,740,910,440,1850,1150,440,530,440,550 +17,290,180,140,360,130,140,150,350,320,480,160,940,280,60,150,120,390,150,300,180,110,1940,2120,990,740,170,740,1100,630,950,630,380,480,380,690,370,1090,1260,770,760,920,450,1880,1170,450,550,450,560 +18,300,180,150,360,130,140,150,360,320,490,160,950,290,60,150,160,400,150,300,180,110,1980,2170,1000,750,170,750,1110,640,970,640,390,490,390,700,380,1100,1270,780,770,940,460,1910,1190,450,560,470,570 +19,300,190,150,370,130,140,150,370,330,500,160,970,290,70,150,230,410,160,310,180,110,2030,2200,1010,760,170,760,1130,650,980,640,390,500,390,710,380,1120,1290,790,790,950,460,1940,1210,460,570,480,580 +20,310,190,150,370,130,140,150,370,330,510,170,980,300,70,160,330,420,160,310,180,110,2070,2240,1030,770,180,770,1140,660,990,650,400,510,400,720,390,1140,1310,800,810,970,470,1970,1220,470,580,490,590 +21,310,190,150,380,140,150,160,380,340,520,170,990,300,70,160,370,420,160,320,190,120,2110,2270,1040,770,180,780,1160,670,1010,660,410,520,400,740,400,1150,1320,820,830,980,480,1990,1240,480,590,510,600 +22,320,200,160,390,140,150,160,380,340,530,170,1010,300,70,160,450,430,170,330,190,120,2160,2300,1050,780,180,790,1170,670,1020,670,410,530,410,750,400,1170,1340,830,850,1000,480,2020,1260,490,600,520,610 +23,320,200,160,390,140,150,160,390,350,540,170,1020,310,70,160,600,440,170,330,190,120,2190,2340,1060,790,190,800,1180,680,1030,680,420,540,420,760,410,1180,1350,840,870,1010,490,2040,1270,490,610,530,620 +24,330,200,160,400,140,150,160,400,350,540,170,1030,310,70,170,670,450,170,340,200,130,2230,2360,1070,800,190,810,1190,690,1040,680,430,550,420,770,410,1200,1370,850,890,1020,500,2060,1290,500,620,550,630 +25,340,210,170,400,150,160,170,400,360,550,180,1050,320,80,170,740,460,170,340,200,130,2270,2390,1080,810,190,820,1210,700,1050,690,440,560,430,780,420,1210,1380,860,910,1040,500,2090,1300,510,630,560,640 +26,340,210,170,410,150,160,170,410,360,560,180,1060,320,80,170,790,460,180,350,200,130,2300,2420,1090,820,190,830,1220,710,1060,700,440,570,430,780,420,1220,1400,870,930,1050,510,2110,1320,510,640,570,640 +27,350,210,170,420,150,160,170,410,370,570,180,1070,320,80,170,840,470,180,350,200,130,2340,2450,1100,830,200,840,1230,720,1080,700,450,580,440,790,430,1240,1410,880,960,1070,510,2140,1330,520,650,580,650 +28,350,220,180,420,150,160,170,420,370,580,180,1090,330,80,180,840,480,180,360,210,140,2370,2470,1110,840,200,850,1240,730,1090,710,460,580,440,800,440,1250,1430,890,990,1080,520,2160,1350,530,660,590,660 +29,360,220,180,430,160,160,180,430,380,590,190,1100,330,80,180,890,490,190,370,210,140,2400,2500,1120,850,200,860,1250,740,1100,720,470,590,450,810,440,1270,1440,900,1010,1100,520,2180,1370,530,670,600,670 +30,370,220,180,430,160,170,180,430,380,600,190,1110,340,80,180,920,490,190,370,210,140,2430,2530,1130,850,210,860,1270,750,1110,720,480,600,450,820,450,1280,1460,910,1040,1110,530,2210,1380,540,680,610,670 +31,370,230,190,440,160,170,180,440,390,610,190,1120,340,80,180,920,500,190,380,220,140,2470,2550,1140,860,210,870,1280,750,1120,730,480,610,460,830,460,1290,1470,920,1070,1120,540,2230,1400,550,690,620,680 +32,380,230,190,450,160,170,180,440,390,620,190,1140,350,90,180,920,510,200,390,220,150,2510,2570,1150,870,210,880,1290,760,1130,740,490,620,460,840,460,1310,1490,930,1100,1140,540,2250,1410,550,700,630,690 +33,380,230,190,450,170,170,190,450,400,630,190,1150,350,90,190,940,520,200,390,220,150,2540,2590,1160,880,220,890,1300,770,1140,740,500,630,470,850,470,1320,1500,940,1120,1150,550,2270,1430,560,710,650,700 +34,390,240,200,460,170,180,190,460,410,640,200,1160,350,90,190,940,530,200,400,220,150,2570,2610,1170,890,220,900,1310,780,1150,750,510,640,470,860,470,1330,1520,960,1150,1160,550,2290,1440,570,720,660,700 +35,400,240,200,460,170,180,190,460,410,640,200,1170,360,90,190,940,530,210,410,230,150,2590,2630,1180,900,220,910,1320,790,1160,760,520,650,480,870,480,1350,1530,970,1180,1180,560,2310,1460,580,730,670,710 +36,400,240,200,470,170,180,190,470,420,660,200,1180,360,90,190,1010,540,210,410,230,160,2620,2650,1190,910,230,920,1330,800,1170,760,520,660,480,880,490,1360,1540,980,1210,1190,560,2330,1470,580,740,680,720 +37,410,250,210,480,180,190,200,480,420,670,200,1200,370,90,200,1010,550,210,420,230,160,2650,2660,1200,920,230,930,1340,810,1180,770,530,670,490,890,490,1370,1560,990,1240,1200,570,2350,1490,590,750,690,730 +38,410,250,210,480,180,190,200,480,430,680,210,1210,370,100,200,1020,560,210,430,230,160,2680,2680,1210,930,230,930,1350,820,1190,770,540,680,500,900,500,1390,1570,1000,1270,1220,580,2370,1500,600,760,700,730 +39,420,260,210,490,180,190,200,490,440,690,210,1220,380,100,200,1020,570,220,440,240,160,2710,2700,1220,930,240,940,1360,830,1200,780,550,690,500,910,510,1400,1590,1010,1300,1230,580,2390,1520,600,770,710,740 +40,430,260,220,490,190,190,210,500,440,700,210,1230,380,100,200,1020,570,220,440,240,170,2740,2720,1240,940,240,950,1370,840,1210,790,560,700,510,920,510,1410,1610,1020,1330,1250,590,2410,1540,610,780,720,750 +41,430,260,220,500,190,200,210,500,450,710,210,1240,390,100,210,1050,580,220,450,240,170,2770,2740,1250,950,240,960,1380,850,1230,790,570,710,510,930,520,1430,1620,1030,1360,1260,600,2430,1550,620,790,730,760 +42,440,270,220,510,190,200,210,510,450,720,210,1260,390,100,210,1050,590,230,460,250,170,2800,2760,1260,960,250,970,1390,860,1240,800,580,720,520,940,530,1440,1640,1040,1400,1270,600,2450,1570,630,800,740,770 +43,450,270,230,510,190,200,220,520,460,730,220,1270,400,110,210,1050,600,230,470,250,170,2820,2770,1270,970,250,980,1410,860,1250,800,590,730,530,940,530,1450,1650,1050,1430,1290,610,2470,1580,630,810,750,770 +44,450,280,230,520,200,210,220,530,470,740,220,1280,400,110,220,1050,610,230,480,250,180,2840,2790,1280,980,250,980,1420,870,1260,810,600,740,530,950,540,1470,1670,1070,1470,1300,610,2490,1600,640,820,750,780 +45,460,280,240,530,200,210,220,530,480,750,220,1290,410,110,220,1180,620,240,490,260,180,2870,2800,1290,990,260,990,1430,890,1270,820,610,760,540,970,550,1480,1690,1080,1510,1320,620,2510,1610,650,830,770,790 +46,470,280,240,530,200,210,230,540,480,760,220,1310,420,110,220,1180,630,240,500,260,180,2900,2820,1300,1000,260,1000,1440,900,1280,820,620,770,550,980,550,1500,1700,1090,1550,1330,630,2530,1630,650,840,780,790 +47,470,290,250,540,210,220,230,550,490,780,230,1320,420,110,220,1260,640,250,510,260,180,2930,2840,1310,1010,270,1010,1450,910,1290,830,630,780,550,990,560,1510,1720,1100,1580,1350,630,2540,1640,660,850,790,800 +48,480,290,250,550,210,220,230,560,500,790,230,1330,430,120,230,1410,650,250,520,270,190,2950,2850,1320,1020,270,1020,1460,920,1300,840,640,790,560,1000,570,1530,1740,1120,1620,1360,640,2570,1660,670,860,800,810 +49,490,300,260,560,210,220,240,570,500,800,230,1340,440,120,230,1430,660,250,530,270,190,2970,2870,1330,1030,270,1030,1470,930,1310,840,650,800,570,1010,580,1540,1750,1130,1650,1370,650,2580,1670,680,880,810,820 +50,500,300,260,560,220,230,240,570,510,810,230,1360,440,120,230,1540,670,260,550,270,190,3000,2880,1350,1050,280,1040,1480,940,1330,850,660,820,580,1020,590,1560,1770,1140,1690,1390,660,2600,1690,680,890,820,830 +51,500,310,270,570,220,230,250,580,520,830,240,1370,450,120,240,1560,680,260,560,280,200,3020,2900,1360,1060,280,1050,1500,950,1340,850,670,830,580,1030,600,1570,1790,1160,1730,1410,660,2620,1710,690,900,830,830 +52,510,310,270,580,220,230,250,590,530,840,240,1380,460,120,240,1610,690,270,570,280,200,3040,2910,1370,1070,290,1050,1510,970,1350,860,680,840,590,1040,600,1590,1810,1180,1780,1420,670,2640,1720,700,910,840,840 +53,520,320,280,590,230,240,250,600,540,850,240,1400,460,130,240,1700,700,270,580,280,200,3070,2930,1390,1090,290,1060,1520,980,1360,870,700,850,600,1050,610,1600,1830,1190,1820,1440,680,2660,1740,710,920,850,850 +54,530,320,280,590,230,240,260,610,550,870,250,1410,470,130,250,1730,710,280,600,290,200,3090,2940,1400,1100,300,1070,1540,990,1370,870,710,870,610,1060,620,1610,1840,1210,1870,1460,690,2680,1760,710,930,860,860 +55,540,330,290,600,240,250,260,620,560,880,250,1430,480,130,250,1800,720,280,620,290,210,3110,2960,1410,1120,300,1080,1550,1000,1380,880,720,880,620,1080,630,1630,1860,1220,1910,1480,700,2700,1780,720,950,870,860 +56,550,340,300,610,240,250,270,630,560,900,250,1440,490,130,250,1850,730,280,630,300,210,3130,2980,1430,1130,300,1090,1560,1020,1400,890,740,890,630,1090,640,1650,1880,1240,1960,1490,700,2720,1790,730,960,880,870 +57,560,340,300,620,240,260,270,640,570,910,250,1450,500,140,260,1950,750,290,650,300,210,3150,2990,1440,1150,310,1100,1580,1030,1410,890,750,900,640,1100,650,1660,1900,1260,2020,1510,710,2740,1810,740,970,900,880 +58,570,350,310,630,250,260,280,660,580,930,260,1470,510,140,260,1950,760,290,670,300,220,3170,3010,1450,1160,310,1110,1590,1050,1420,900,760,920,650,1110,660,1680,1920,1270,2070,1530,720,2760,1830,750,990,910,890 +59,580,350,320,640,250,260,280,670,590,950,260,1480,510,140,260,2040,770,300,680,310,220,3200,3020,1470,1170,320,1120,1600,1060,1440,910,780,930,660,1130,670,1700,1940,1290,2130,1550,730,2780,1850,760,1000,920,900 +60,590,360,330,650,260,270,290,680,600,970,270,1500,520,140,270,2130,790,300,700,310,220,3220,3040,1480,1190,330,1130,1620,1080,1450,910,790,950,670,1140,680,1710,1950,1310,2180,1570,740,2800,1870,770,1010,930,910 +61,600,370,340,660,260,270,290,690,620,980,270,1510,530,150,270,2210,800,310,720,320,230,3240,3050,1500,1210,330,1140,1630,1090,1460,920,810,960,680,1160,690,1730,1970,1330,2230,1580,750,2820,1890,780,1030,940,920 +62,610,370,340,670,270,280,300,710,630,1000,270,1530,550,150,280,2280,810,320,740,320,230,3260,3060,1510,1220,340,1160,1650,1110,1470,930,830,980,690,1170,700,1750,2000,1350,2290,1600,760,2840,1920,790,1040,950,920 +63,620,380,350,680,270,290,300,720,640,1020,280,1550,560,150,280,2290,830,320,770,330,230,3280,3080,1530,1240,340,1170,1660,1130,1490,930,840,990,700,1190,720,1770,2020,1370,2350,1620,770,2860,1940,800,1060,970,930 +64,630,390,360,690,280,290,310,730,650,1040,280,1560,570,160,290,2310,850,330,790,330,240,3300,3090,1540,1260,350,1180,1680,1150,1500,940,860,1010,720,1200,730,1790,2040,1400,2400,1650,780,2880,1960,810,1080,980,950 +65,640,400,370,700,280,300,320,750,670,1060,280,1580,580,160,290,2380,860,330,810,340,240,3320,3110,1560,1280,360,1190,1690,1170,1520,950,880,1020,730,1220,740,1810,2060,1420,2460,1670,790,2900,1990,820,1100,990,950 +66,660,410,380,710,290,300,320,770,680,1090,290,1600,600,160,300,2400,880,340,840,340,240,3340,3120,1580,1310,360,1200,1710,1190,1530,960,900,1030,740,1230,750,1830,2090,1450,2510,1690,810,2920,2010,830,1120,1010,960 +67,670,410,400,730,300,310,330,780,690,1110,290,1620,610,170,300,2420,900,350,870,350,250,3360,3140,1600,1330,370,1210,1730,1210,1550,960,920,1050,760,1250,770,1850,2120,1480,2570,1710,820,2950,2030,840,1130,1020,970 +68,680,420,410,740,300,320,340,800,710,1130,300,1640,630,170,310,2450,920,360,890,350,250,3380,3150,1620,1350,380,1230,1740,1230,1570,970,940,1070,770,1270,780,1870,2140,1510,2620,1740,830,2970,2050,850,1150,1030,980 +69,700,430,420,750,310,320,340,820,720,1150,300,1650,640,170,310,2490,940,360,920,360,260,3400,3170,1640,1380,390,1240,1760,1250,1580,980,960,1080,790,1290,790,1890,2170,1540,2670,1760,850,2990,2080,870,1180,1050,990 +70,710,440,430,770,320,330,350,840,740,1180,310,1670,660,180,320,2500,960,370,950,370,260,3420,3190,1660,1400,400,1250,1780,1270,1600,990,980,1100,800,1300,810,1910,2200,1570,2710,1790,860,3010,2100,880,1200,1060,1000 +71,730,460,450,780,320,340,360,860,760,1200,310,1690,680,180,320,2570,980,380,980,370,270,3440,3210,1680,1430,400,1270,1800,1290,1620,990,1010,1120,820,1320,830,1940,2230,1600,2760,1810,880,3030,2130,890,1220,1080,1010 +72,740,470,460,800,330,350,370,890,770,1230,320,1710,700,190,330,2660,1000,390,1010,380,270,3460,3220,1700,1460,410,1280,1820,1320,1640,1000,1030,1140,840,1340,840,1960,2260,1640,2810,1840,890,3060,2160,900,1250,1100,1030 +73,760,480,480,810,340,350,380,910,790,1260,330,1740,720,190,340,2730,1030,400,1050,390,280,3480,3240,1730,1490,420,1300,1840,1340,1660,1010,1060,1170,860,1360,860,1990,2290,1670,2860,1870,910,3080,2190,920,1270,1110,1040 +74,780,500,500,830,350,360,390,940,810,1290,330,1760,740,200,340,2850,1050,410,1080,400,280,3500,3260,1750,1520,430,1320,1860,1360,1680,1020,1080,1190,880,1390,880,2010,2320,1710,2900,1900,930,3110,2220,930,1300,1130,1050 +75,800,510,520,850,360,370,400,970,830,1320,340,1780,770,200,350,2930,1070,420,1110,400,290,3520,3280,1780,1560,440,1330,1880,1390,1710,1030,1110,1220,900,1410,900,2030,2350,1760,2940,1930,950,3130,2250,950,1330,1150,1060 +76,820,530,530,870,370,380,410,1000,860,1350,350,1810,800,210,360,2980,1100,440,1150,410,290,3540,3290,1800,1600,460,1350,1900,1420,1730,1040,1150,1240,920,1430,920,2060,2390,1800,2980,1940,970,3160,2280,970,1360,1170,1070 +77,840,550,550,890,380,390,430,1030,880,1380,350,1830,830,210,360,2990,1130,450,1190,420,300,3560,3310,1830,1630,470,1370,1930,1450,1750,1060,1180,1260,950,1460,940,2090,2420,1850,3020,1980,990,3190,2320,980,1400,1190,1090 +78,860,570,570,910,390,400,440,1070,910,1420,360,1860,860,220,370,3080,1160,470,1230,430,310,3580,3330,1860,1670,480,1390,1950,1480,1780,1070,1220,1290,970,1490,960,2120,2460,1900,3060,2010,1010,3210,2350,1000,1430,1210,1100 +79,890,590,600,930,400,410,450,1110,940,1460,370,1880,890,220,380,3170,1200,480,1270,440,310,3600,3350,1890,1720,500,1400,1980,1510,1810,1080,1260,1320,1000,1520,990,2150,2500,1950,3100,2050,1030,3240,2390,1030,1470,1230,1120 +80,920,620,630,960,410,420,470,1150,970,1500,380,1910,930,230,390,3210,1230,500,1320,450,320,3620,3370,1920,1760,510,1430,2010,1540,1830,1090,1300,1350,1030,1550,1010,2180,2540,2020,3140,2100,1060,3260,2420,1050,1510,1250,1130 +81,950,640,660,990,430,440,480,1200,1000,1540,390,1940,970,240,410,3290,1260,520,1370,460,330,3640,3390,1960,1810,520,1450,2040,1580,1860,1110,1340,1390,1060,1580,1040,2220,2590,2080,3180,2140,1090,3290,2460,1070,1540,1280,1150 +82,980,670,700,1010,440,450,500,1260,1030,1580,400,1980,1010,250,420,3370,1300,540,1430,480,340,3660,3410,1990,1860,540,1470,2070,1610,1890,1120,1390,1430,1100,1620,1070,2250,2630,2160,3230,2190,1120,3320,2500,1100,1590,1310,1170 +83,1010,710,740,1050,450,460,520,1320,1070,1630,410,2010,1060,260,430,3420,1340,570,1490,490,350,3680,3430,2030,1920,560,1500,2110,1640,1930,1140,1440,1470,1130,1650,1100,2290,2680,2230,3260,2240,1150,3350,2540,1130,1640,1340,1190 +84,1050,750,780,1080,470,480,540,1390,1110,1690,420,2050,1110,260,450,3460,1390,590,1550,510,360,3690,3460,2080,1990,580,1530,2150,1680,1960,1150,1490,1520,1170,1690,1140,2330,2730,2320,3310,2290,1190,3380,2590,1170,1690,1370,1210 +85,1100,800,830,1120,490,500,560,1470,1160,1740,440,2090,1170,270,470,3540,1440,630,1620,520,370,3710,3480,2120,2060,600,1560,2190,1730,2000,1170,1550,1570,1220,1740,1180,2370,2780,2420,3350,2350,1230,3410,2600,1200,1740,1400,1220 +86,1150,860,890,1170,500,520,590,1560,1210,1810,450,2130,1230,280,490,3620,1490,660,1680,540,390,3730,3510,2170,2140,630,1600,2230,1770,2040,1190,1620,1630,1260,1780,1220,2410,2830,2550,3390,2410,1280,3440,2660,1240,1790,1450,1250 +87,1200,930,960,1210,520,540,630,1660,1270,1870,460,2180,1300,300,510,3670,1550,700,1710,560,400,3750,3540,2220,2210,650,1630,2280,1820,2080,1210,1690,1680,1320,1830,1260,2460,2890,2700,3430,2490,1330,3470,2700,1280,1850,1490,1280 +88,1260,1010,1040,1260,550,570,660,1770,1340,1930,480,2240,1380,310,540,3670,1610,740,1740,590,420,3770,3560,2270,2280,690,1670,2320,1880,2130,1240,1760,1750,1380,1890,1320,2520,2950,2880,3470,2570,1380,3510,2750,1330,1920,1540,1310 +89,1340,1100,1140,1320,570,600,710,1870,1420,2010,500,2300,1460,330,570,3670,1680,780,1760,610,430,3790,3590,2340,2360,720,1720,2380,1930,2190,1260,1840,1820,1450,1950,1370,2580,3020,3080,3520,2640,1440,3540,2810,1380,2000,1590,1350 +90,1420,1220,1260,1390,610,630,760,1980,1510,2110,520,2370,1570,350,620,3670,1760,840,1800,640,460,3810,3610,2400,2430,760,1770,2440,2000,2250,1290,1920,1910,1530,2010,1440,2630,3090,3350,3560,2710,1510,3580,2870,1420,2090,1650,1390 +91,1530,1360,1390,1460,650,670,820,2100,1620,2220,550,2440,1700,380,680,3670,1850,900,1880,670,480,3830,3650,2470,2510,800,1830,2510,2070,2310,1320,2000,2000,1620,2090,1510,2700,3160,3750,3600,2800,1590,3610,2930,1470,2200,1710,1430 +92,1660,1440,1560,1550,690,720,900,2240,1730,2340,580,2530,1860,410,750,3670,1940,970,2000,710,510,3850,3680,2550,2590,850,1900,2590,2150,2380,1360,2100,2100,1730,2180,1600,2770,3240,3850,3650,2900,1670,3650,3010,1530,2310,1790,1490 +93,1830,1610,1760,1650,750,780,980,2380,1870,2480,610,2630,2060,450,830,3680,2050,1060,2110,760,550,3870,3710,2640,2650,920,1960,2680,2250,2450,1410,2200,2220,1850,2280,1700,2860,3320,3860,3690,2990,1760,3690,3050,1610,2420,1880,1540 +94,2060,1910,1870,1780,820,860,1090,2500,2010,2640,650,2750,2270,500,890,3680,2170,1170,2220,820,590,3890,3750,2740,2740,1000,2060,2780,2360,2530,1460,2320,2360,1990,2400,1820,2950,3400,3870,3730,3100,1870,3730,3140,1720,2560,1990,1620 +95,2350,2300,2030,1930,920,960,1260,2640,2200,2810,700,2880,2520,550,960,3750,2320,1300,2350,900,640,3910,3780,2850,2840,1100,2180,2900,2480,2640,1530,2480,2510,2130,2540,1980,3060,3490,3870,3770,3220,2020,3760,3230,1840,2730,2120,1720 +96,2690,2570,2390,2110,1050,1090,1520,2830,2450,3060,770,3050,2750,580,1010,3800,2480,1470,2540,990,710,3930,3820,2980,2950,1250,2330,3040,2620,2770,1620,2680,2690,2350,2710,2170,3180,3590,3880,3810,3340,2180,3810,3340,2010,2930,2280,1860 +97,3140,2790,2910,2360,1220,1290,1870,3090,2770,3420,850,3250,3260,680,1060,3860,2680,1710,2770,1130,790,3950,3860,3150,3090,1450,2530,3210,2790,2950,1740,2920,2890,2610,2860,2440,3320,3680,3880,3850,3480,2410,3850,3470,2230,3170,2500,2010 +98,3560,3270,3230,2670,1500,1610,2260,3370,3160,3840,990,3470,3460,830,1140,3920,2950,2070,2990,1370,950,3970,3910,3350,3280,1680,2800,3440,3030,3160,1930,3210,3150,2930,3100,2820,3500,3780,3880,3910,3610,2730,3900,3620,2590,3400,2820,2310 +99,3560,3660,3520,3150,1880,2290,2540,3630,3590,3860,1270,3720,3590,1230,1630,3950,3330,2640,3370,1850,1320,3990,3950,3630,3570,2210,3240,3710,3370,3460,2290,3570,3500,3370,3470,3410,3730,3890,3890,3950,3800,3200,3950,3810,3200,3690,3270,2850 diff --git a/cc-multilingual-main/cc_net/cc_net/data/test_stats.json b/cc-multilingual-main/cc_net/cc_net/data/test_stats.json new file mode 100644 index 0000000000000000000000000000000000000000..3db2a1ee75567508e20c73e912bd953a70789f3f --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/data/test_stats.json @@ -0,0 +1,38 @@ +{ + "2019-09/de_head_0000.json.gz": { + "size": 5264993, + "checksum": "fc12ba3dc982ef06e7e44a916f298e1c16f9a806" + }, + "2019-09/de_middle_0000.json.gz": { + "size": 9195535, + "checksum": "2369ff0296ab1d924c81083f17ce41f22a10ad69" + }, + "2019-09/de_tail_0000.json.gz": { + "size": 33029074, + "checksum": "18865040a7263242d298958f358f7cb5511114d4" + }, + "2019-09/fr_head_0000.json.gz": { + "size": 4076580, + "checksum": "4eef4017bbbe042fc01c45b5fbcf94de49f5138e" + }, + "2019-09/fr_middle_0000.json.gz": { + "size": 8075095, + "checksum": "fd251a5b924c4aa66a63c375ca3a8fae23b3273b" + }, + "2019-09/fr_tail_0000.json.gz": { + "size": 27248949, + "checksum": "4a8aed38abc6b9d04459e8d424bd47426f063638" + }, + "2019-09/it_head_0000.json.gz": { + "size": 1760696, + "checksum": "e5e50e49b4a5147ea82b385babd5c83f74d2a4ed" + }, + "2019-09/it_middle_0000.json.gz": { + "size": 4461832, + "checksum": "7daab7b7acb93d81e50534196ada4e94947b8224" + }, + "2019-09/it_tail_0000.json.gz": { + "size": 14754298, + "checksum": "1adc018519a598ff162261d7e480ea41d3458768" + } +} \ No newline at end of file diff --git a/cc-multilingual-main/cc_net/cc_net/dedup.py b/cc-multilingual-main/cc_net/cc_net/dedup.py new file mode 100644 index 0000000000000000000000000000000000000000..fe0d4275793e2ad6602c60ab22e2381054a7a6aa --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/dedup.py @@ -0,0 +1,478 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Tools to remove duplicate paragraphs across one or several shards. +""" + +import argparse +import gc +import hashlib +import logging +import multiprocessing +import os +import tempfile +import time +from pathlib import Path +from typing import Iterable, List, Optional, Set, Union + +import numpy as np + +from cc_net import jsonql +from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet +from cc_net.jsonql import mem_footprint_gb +from cc_net.text_normalizer import normalize_for_dedup + +BYTE_ORDER = "little" +HASH_SIZE = HASH_TYPE(0).nbytes +DISABLE_MULTI_PROCESSING = False + +FilesOrDir = Union[List[Path], Path] + + +def get_args(): + parser = argparse.ArgumentParser( + description="Read a set of json files and allow to query them", + parents=[jsonql.io_parser()], + ) + + parser.add_argument("--field", type=str, default="raw_content") + parser.add_argument("--output_hashes", type=str) + parser.add_argument("--no_finalize", action="store_false", dest="finalize") + # parser.add_argument("--mem_gb", type=int) + parser.add_argument("--hashes", type=str) + + return vars(parser.parse_args()) + + +def _b2i(b: bytes) -> int: + return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0) + + +def str_hash(s: str) -> int: + h = hashlib.sha1(bytes(s, encoding="utf-8")) + return _b2i(h.digest()) + + +log = logging.getLogger(__name__).info + + +def run_par(processes): + # This is different from multiprocessing.map since it allows for kwargs. + processes = list(processes) + if len(processes) == 1 or DISABLE_MULTI_PROCESSING: + for f, args, kwargs in processes: + f(*args, **kwargs) + return + + log(f"Starting {len(processes)} subprocess") + processes = [ + multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes + ] + for p in processes: + p.start() + for p in processes: + p.join() + failed = 0 + for p in processes: + if p.exitcode != 0: + log(f"Process failed with code {p.exitcode}: {p}") + failed += 1 + assert failed == 0, f"{failed} processes failed..." + + +def split_file(file, n_splits): + for i in range(n_splits): + yield jsonql.SplitFile(file, i, n_splits) + + +def merge(hashes_1, hashes_2, output): + if isinstance(hashes_1, str): + h1 = FlatHashSet() + h1.load(hashes_1) + else: + h1 = hashes_1 + + if isinstance(hashes_2, str): + h2 = FlatHashSet() + h2.load(hashes_2) + else: + h2 = hashes_2 + + h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2)) + dup = h1.__contains__(h2_np) + + # Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to + # h1 with their value. + h1[h2_np] = dup + if output: + h1.dump(output) + return h1 + + +def merge_shard(hash_files, output): + h = FlatHashSet() + h.load(hash_files[0]) + for hash_file in hash_files[1:]: + h = merge(h, hash_file, output=None) + print(f"Merged {hash_file}. We now have {len(h)} hashes.") + + h.dump(output) + print(f"Saved {len(h)} hashes to {output}.") + + +def _dump_sentence_hashes(source: Path, output: Path, field: str): + treated = 0 + started = time.time() + with open(output, "wb") as o: + for doc in jsonql.read_jsons(source): + content = doc.get(field) + if not content: + continue + h = compute_hashes(content) + if h is None: + continue + h.tofile(o) + treated += 1 + if treated % 100_000 == 0: + delay = time.time() - started + log( + f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)" + ) + + +def _remove_duplicate_hashes(duplicates, source, output): + batch_size = 100_000 + n_lines, n_lines_kept = 0, 0 + with open(source, "rb") as f, open(output, "wb") as o: + log(f"Opening {source} with mode rb") + log(f"Opening {output} with mode wb") + while True: + hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size) + if hashes.size == 0: + break + + keep = duplicates[hashes] < 1 + kept = keep.sum() + hashes *= keep + hashes.tofile(o) + + n_lines += hashes.size + n_lines_kept += kept + + removed = n_lines - n_lines_kept + selectivity = n_lines_kept / n_lines if n_lines else 0 + log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}") + + +def remove_duplicates_sharded( + files: List[Path], + outputs: List[Path], + hashes_dir: FilesOrDir, + field: str, + group_hashes: int = 1, + tmp_dir: Path = None, + min_len: int = 0, +): + """Remove duplicates in several passes, when all hashes don't fit in RAM. + + Note: The current implementation is not doing a 'perfect' deduplication. + If a hash appear exactly once in each shard of hashes it won't be detected + as a duplicate. This can be fixed if hashes are fully dedup beforehand. + """ + assert len(files) == len(outputs) + + if isinstance(hashes_dir, list): + hashes_files = hashes_dir + else: + hashes_files = sorted( + h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin" + ) + + assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}" + + if len(hashes_files) <= group_hashes: + log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}") + rm_dups = DuplicatesRemover(field, hashes_files) + rm_dups._prepare() + run_par( + (jsonql.run_pipes, (rm_dups,), dict(file=f, output=o)) + for f, o in zip(files, outputs) + ) + return + + log(f"Starting deduplicate_sharded on {files}.") + tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None) + + def tmp_files(i): + return [ + Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin") + for f in files + ] + + last = tmp_files(0) + run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last)) + + if isinstance(hashes_dir, list): + hashes_files = hashes_dir + else: + hashes_files = sorted( + h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin" + ) + for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)): + hashes = FlatHashSet() + for h in group: + hashes.load(h) + log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)") + + intermediates = tmp_files(i + 1) + # Remove hashes in parallel. Since modern OS have "copy-on-write" and + # `hashes` is read-only, we will only have one version of it in RAM. + run_par( + (_remove_duplicate_hashes, (hashes, f, tmp), {}) + for f, tmp in zip(last, intermediates) + ) + # Force hashes to be freed, before we start allocating a new one. + del hashes + gc.collect() + + for tmp in last: + os.remove(tmp) + last = intermediates + + def finalize(source, dedup_hashes, min_len): + n_chars, n_chars_kept = 0, 0 + with open(dedup_hashes, "rb") as hashes: + for doc in jsonql.read_jsons(source): + content = doc.get(field) + if not content or len(content) < min_len: + continue + sentences = content.split("\n") + doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences)) + chars, kept_chars = finalize_doc(doc, field, doc_hashes) + n_chars += chars + n_chars_kept += kept_chars + yield doc + selectivity = n_chars_kept / n_chars if n_chars else 0 + log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).") + + dedup_hashes = last + run_par( + [ + ( + jsonql.run_pipe, + (finalize,), + dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o), + ) + for h, f, o in zip(dedup_hashes, files, outputs) + ] + ) + + tmp_directory.cleanup() + + +def compute_hashes(content) -> Optional[np.ndarray]: + if not content: + return None + lines = content.split("\n") + # save hashes as bytes but reinterpret them as uint64. + hashes = np.fromiter( + ( + hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[ + :HASH_SIZE + ] + for l in lines + ), + dtype=np.dtype((bytes, HASH_SIZE)), + count=len(lines), + ) + return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape) + + +def finalize_doc(doc, field, hashes=None): + content = doc.get(field) + lines = content.split("\n") + n_chars = len(content) + if "original_nlines" not in doc: + doc["original_nlines"] = doc.get("nlines", len(lines)) + if "original_length" not in doc: + doc["original_length"] = doc.get("length", n_chars) + if hashes is None: + hashes = doc.pop(field + "_hash") + + # Remove duplicates inside doc + seen: Set[int] = set() + original_line_ids = doc.get("line_ids", range(len(hashes))) + line_ids = [] + new_lines = [] + for l, line, h in zip(original_line_ids, lines, hashes): + if h not in seen and h != 0: + line_ids.append(l) + new_lines.append(line) + seen.add(h) + + doc[field] = "\n".join(new_lines) + doc["nlines"] = len(line_ids) + n_chars_kept = len(doc[field]) + doc["length"] = n_chars_kept + doc["line_ids"] = line_ids + return n_chars, n_chars_kept + + +class HashesCollector(jsonql.Transformer): + """ + Collect all hashes found of lines found in the `field` of the source documents. + """ + + parallelisable = False + + def __init__( + self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None + ): + super().__init__() + self.n_lines = 0 + self.field = field + self.output = output + self.hashes = FlatHashSet() if hashes is None else hashes + self.num_hashes_end = 0 + self.num_hashes_start = len(self.hashes) + + def summary(self) -> List[str]: + summ = super().summary() + h = self.num_hashes_end if self.hashes is None else len(self.hashes) + h = (h - self.num_hashes_start) // 1000 + max_mem = mem_footprint_gb() + n = self.n_lines // 1000 + summ.append( + f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM." + ) + return summ + + def do(self, doc: dict) -> None: + doc_hashes = compute_hashes(doc.get(self.field)) + if doc_hashes is None: + return + self.hashes.add(doc_hashes) + self.n_lines += doc_hashes.size + + def close(self): + if self.output and self.hashes: + self.hashes.dump(self.output) + self.log(f"Saved {len(self.hashes)} hashes to {self.output}") + # Save the number of hashes. + self.num_hashes_end = len(self.hashes) + # Free up mem even if the transformer is kept somewhere else. + self.hashes = None # type: ignore + + +class DuplicatesRemover(jsonql.Transformer): + """DuplicatesRemover""" + + # The hashes can't be pickled so they will have to be read back from disk. + warn_when_pickling = True + + def __init__(self, field: str, hashes_files: List[Path], collect: bool = False): + """ + Remove duplicates + """ + super().__init__() + self.field = field + self.collect = collect + + self.hashes_files = hashes_files + self.duplicates: Optional[AbstractDedupHashSet] = None + + self.n_lines, self.n_lines_kept = 0, 0 + self.n_chars, self.n_chars_kept = 0, 0 + + def _prepare(self): + if self.duplicates is not None: + return + self.duplicates = FlatHashSet() + + start = time.time() + for h in self.hashes_files: + shard_start = time.time() + self.duplicates.load(str(h)) + delay = time.time() - shard_start + self.log( + f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)" + ) + + delay = time.time() - start + self.log( + f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)" + ) + + def do(self, doc: dict) -> Optional[dict]: + content = doc.get(self.field) + if not content: + return None + doc_hashes = compute_hashes(content) + + assert self.duplicates is not None + seen = ( + self.duplicates.add(doc_hashes) + if self.collect + else self.duplicates[doc_hashes] + ) + keep = seen < True + kept = keep.sum() + if kept == 0: + return None + doc_hashes = doc_hashes * keep + self.n_lines += keep.size + self.n_lines_kept += kept + chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes) + self.n_chars += chars + self.n_chars_kept += kept_chars + return doc + + def summary(self) -> List[str]: + summ = super().summary() + end_time = time.time() + n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed + speed = n_docs / (end_time - self.start_time) + summ.append( + f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]" + ) + selectivity = self.n_lines_kept / self.n_lines if n_lines else 0 + summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).") + + n_chars_kept, n_chars = self.n_chars_kept, self.n_chars + selectivity = n_chars_kept / n_chars if n_chars else 0 + summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).") + return summ + + +def deduplicate( + file: jsonql.ReadableFileLike, field: str = "raw_content" +) -> Iterable[dict]: + """Remove duplicates of the given file (but keep the first occurence).""" + dup_remover = DuplicatesRemover(field, [], collect=True) + return dup_remover.map(jsonql.read_jsons(file)) + + +def deduplicate_two_pass( + file: jsonql.FileDescriptor, field: str = "raw_content" +) -> Iterable[dict]: + """Remove duplicates of the given file (even removing the first occurence). + + This is what is done in the paper, and in mine.py + """ + try: + if isinstance(file, Path): + hash_file: Path = file.with_suffix(".bin") + else: + hash_file = jsonql._tmp(Path("hashes.bin")) + jsonql.run_pipes( + jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file + ) + dup_remover = DuplicatesRemover(field, [hash_file]) + return dup_remover.map(jsonql.read_jsons(file)) + finally: + if hash_file.exists(): + hash_file.unlink() diff --git a/cc-multilingual-main/cc_net/cc_net/execution.py b/cc-multilingual-main/cc_net/cc_net/execution.py new file mode 100644 index 0000000000000000000000000000000000000000..874ca145006b3ae86e86c48225e1fe0a30d12236 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/execution.py @@ -0,0 +1,248 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import functools +import itertools +import logging +import os +import sys +import time +import warnings +from pathlib import Path +from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized + +import submitit +from typing_extensions import Protocol +# import pdb +from concurrent.futures import ThreadPoolExecutor + + +class Executor(Protocol): + def __call__(self, function: Callable[..., str], *args: Iterable) -> None: + ... + + +class SubmititRetryOnTimeout(submitit.helpers.Checkpointable): + def __init__(self, fn: Callable): + self.fn = fn + self.__name__ = fn.__name__ + + def __call__(self, *args, **kwargs): + return self.fn(*args, **kwargs) + + +def get_executor( + name: str, + log_dir: Path, + execution: str, + timeout_hour: float = 1.0, + mem_gb: int = 1, + cpus: int = 1, + task_parallelism: int = -1, + options: dict = {}, +) -> Executor: + + execution_mode = execution.split(",")[0] + options.update( + {kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]} + ) + + if execution_mode == "mp": + warnings.warn("Execution mode 'mp' is deprecated, use 'local'.") + execution_mode = "local" + + cluster = None if execution_mode == "auto" else execution_mode + # use submitit to detect which executor is available + ex = submitit.AutoExecutor(log_dir, cluster=cluster) + ex.parameters['timeout_min'] = int(timeout_hour * 60) + + if ex.cluster == "local": + # LocalExecutor doesn't respect task_parallelism + return functools.partial(custom_map_array, ex, task_parallelism) + if ex.cluster == "debug": + return debug_executor + # pdb.set_trace() + # We are on slurm + if task_parallelism == -1: + task_parallelism = 500 + + ex.update_parameters( + name=name, + timeout_min=int(timeout_hour * 60), + mem_gb=mem_gb, + cpus_per_task=cpus, + slurm_array_parallelism=task_parallelism, + **options, + ) + return functools.partial(map_array_and_wait, ex) + + +def map_array_and_wait( + ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable +): + f_name = function.__name__ + + assert len(args) > 0, f"No arguments passed to {f_name}" + approx_length = _approx_length(*args) + + print(f"Submitting {f_name} in a job array ({approx_length} jobs)") + jobs = ex.map_array(function, *args) + if not jobs: + return + failed_jobs = [] + done = 0 + total = len(jobs) + job_array_id = jobs[0].job_id.split("_")[0] + # pdb.set_trace() + print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).") + for job in submitit.helpers.as_completed(jobs): + done += 1 + e = job.exception() + if not e: + print(f"Finished job {job.job_id} ({done} / {total}).", job.result()) + continue + + print(f"Failed job {job.job_id} ({done} / {total}):", e) + failed_jobs.append(job) + + if failed_jobs: + n_failures = 10 + message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}" + print(message) + for job in failed_jobs[:n_failures]: + print(f"Failed {job.job_id} -> {job.paths.stderr}") + if len(failed_jobs) > n_failures: + print(f"... ({len(failed_jobs) - n_failures} failed job skipped)") + raise Exception(message) + + +def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None: + logging.getLogger().setLevel(logging.DEBUG) + approx_length = _approx_length(*args) + for i, x in enumerate(zip(*args)): + try: + message = function(*x) + except Exception: + exit(1) + try: + import ipdb as pdb # type: ignore + except ImportError: + import pdb # type: ignore + import traceback + + traceback.print_exc() + print("") + pdb.post_mortem() + sys.exit(1) + if message is not None: + print(message, f"({i + 1} / {approx_length})") + +# def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None: +# logging.getLogger().setLevel(logging.DEBUG) +# approx_length = _approx_length(*args) +# with ThreadPoolExecutor(max_workers=4) as executor: +# futures = [] +# for i, x in enumerate(zip(*args)): +# future = executor.submit(_execute_function, function, x, i + 1, approx_length) +# futures.append(future) +# for future in futures: +# future.result() + +# def _execute_function(function: Callable[..., Optional[str]], args: tuple, index: int, total: int): +# try: +# message = function(*args) +# if message is not None: +# print(message, f"({index} / {total})") +# except Exception: +# # traceback.print_exc() +# sys.exit(1) + +def _approx_length(*args: Iterable): + for a in args: + if isinstance(a, Sized): + return len(a) + return -1 + + +def custom_map_array( + ex: submitit.AutoExecutor, + parallelism: int, + function: Callable[..., Optional[str]], + *args: Iterable, +) -> None: + f_name = function.__name__ + assert len(args) > 0, f"No arguments passed to {f_name}" + + jobs_args = list(zip(*args)) + total = len(jobs_args) + if parallelism < 0: + parallelism = os.cpu_count() or 0 + assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}" + print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}") + enqueued = 0 + done = 0 + running_jobs: List[submitit.Job] = [] + failed_jobs: List[submitit.Job] = [] + + while done < len(jobs_args): + # Try to queue more job if we have some bandwidth. + if enqueued < total and len(running_jobs) < parallelism: + running_jobs.append(ex.submit(function, *jobs_args[enqueued])) + enqueued += 1 + continue + + # Else wait for some job to finish + if not running_jobs: + warnings.warn( + f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}" + ) + break + + job = get_next_job(running_jobs) + running_jobs.remove(job) + done += 1 + e = job.exception() + if not e: + print(f"Finished job {job.job_id} ({done} / {total}).", job.result()) + continue + + print(f"Failed job {job.job_id} ({done} / {total}):", e) + failed_jobs.append(job) + + if failed_jobs: + n_failures = 10 + message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}" + print(message) + for job in failed_jobs[:n_failures]: + print(f"Failed {job.job_id} -> {job.paths.stderr}") + if len(failed_jobs) > n_failures: + print(f"... ({len(failed_jobs) - n_failures} failed job skipped)") + raise Exception(message) + + +def get_next_job( + jobs: Sequence[submitit.Job], poll_frequency: float = 10 +) -> submitit.Job: + """ + Waits for any of the job to finish and returns it. + + jobs: list of jobs + poll_frequency: frequency in second at which we check job status + """ + start = time.time() + waiting = False + while True: + for job in jobs: + if job.done(): + return job + if not waiting: + job_ids = [j.job_id for j in jobs[:4]] + suffix = "..." if len(jobs) > 4 else "" + print( + f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}" + ) + waiting = True + time.sleep(poll_frequency) diff --git a/cc-multilingual-main/cc_net/cc_net/flat_hash_set.py b/cc-multilingual-main/cc_net/cc_net/flat_hash_set.py new file mode 100644 index 0000000000000000000000000000000000000000..f7529fe9b68d5251d23b5ace495524fda75c1b7b --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/flat_hash_set.py @@ -0,0 +1,247 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import sys +import time +import warnings +from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type + +import numpy as np + +HASH_TYPE: Type[np.uint64] = np.uint64 + +GETPY_WARNING = False + + +class AbstractDedupHashSet(Sized, Iterable[np.uint64]): + """A dict-like that returns `True` for keys that have been added more than once. + + The API is batched and expect np.array as input. This batching grants better + perf when using the C++ implementation. + """ + + dtype: Type[np.uint64] = HASH_TYPE + + def __repr__(self): + implementation = type(self).__name__ + return f"[{implementation}, len: {len(self)}" + + def __len__(self) -> int: + ... + + def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray: + ... + + def __getitem__(self, values) -> np.ndarray: + ... + + def __setitem__(self, keys, values) -> None: + ... + + def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]: + ... + + def keys(self) -> Iterable[np.uint64]: + ... + + def __iter__(self) -> Iterator[np.uint64]: + return iter(self.keys()) + + def add(self, h, contains=None): + """Add the given keys. First time a key is added the value is set to 0, + then it's set to one.""" + if not isinstance(h, np.ndarray): + h = np.array(h, dtype=HASH_TYPE) + if contains is None: + contains = self.__contains__(h) + + self.__setitem__(h, contains) + return contains + + def merge(self, keys, values): + contains = self.__contains__(keys) + self.__setitem__(keys, contains | values) + + def dump(self, filename): + return self.dump_np(filename) + + def load(self, filename): + return self.load_np(filename) + + def dump_np(self, filename): + kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)]) + items = np.fromiter(self.items(), dtype=kv_type, count=len(self)) + with open(filename, "wb") as f: + np.save(f, items) + + def load_np(self, filename): + items = np.load(str(filename)) + keys = items["k"].copy() + values = items["v"].copy() + self.merge(keys, values) + + def dump_np2(self, filename): + keys = np.fromiter( + (k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self) + ) + with open(filename, "wb") as f: + np.save(f, keys) + + values = np.fromiter( + (v for (k, v) in self.items()), dtype=np.uint8, count=len(self) + ) + with open(str(filename) + ".val", "wb") as f: + np.save(f, values) + + def load_np2(self, filename): + keys = np.load(filename) + values = np.load(str(filename) + ".val") + self.merge(keys, values) + + +class NaiveHashSet(dict, AbstractDedupHashSet): + """Pure python implementation of AbstractDedupHashSet. + + This implementation is quite fast, since Python dict are heavily optimized. + """ + + def __init__(self, iterable=None): + super().__init__() + global GETPY_WARNING + if GETPY_WARNING: + warnings.warn( + "Module 'getpy' not found. Deduplication will take more RAM." + " Try `pip install cc_net[getpy]" + ) + GETPY_WARNING = False + + def __contains__(self, values): + """Returns `True` if the object has been added at list once.""" + contains_point = super().__contains__ + return np.fromiter( + map(contains_point, values), count=len(values), dtype=np.uint8 + ) + + def __getitem__(self, values): + """Returns `True` if the object has been added at list twice.""" + get_point = super().get + return np.fromiter( + map(lambda x: get_point(x, False), values), + count=len(values), + dtype=np.uint8, + ) + + def __setitem__(self, keys, values): + assert len(keys) == len(values) + for k, v in zip(keys, values): + dict.__setitem__(self, k, v) + + +try: + import getpy as gp # type: ignore + + class _FlatHashSet(gp.Dict, AbstractDedupHashSet): + """C++ backed implementation of AbstractDedupHashSet. + + This implementation is slightly slower than the Python one but uses + 3x less RAM. + See https://github.com/atom-moyer/getpy. + """ + + def __init__(self): + super().__init__(HASH_TYPE, np.uint8, default_value=False) + + def __contains__(self, h): + """Returns `True` if the object has been added at list once.""" + if not isinstance(h, np.ndarray): + h = np.array(h, dtype=HASH_TYPE) + c = gp.Dict.__contains__(self, h) + c.dtype = np.uint8 + return c + + def dump(self, filename): + return self.dump_gp(filename) + + def load(self, filename): + return self.load_gp(filename) + + def dump_gp(self, filename): + return gp.Dict.dump(self, str(filename)) + + def load_gp(self, filename): + """Override gp.Dict.load, to correctly merge values instead of overwriting.""" + other = gp.Dict(HASH_TYPE, np.uint8, default_value=False) + other.load(str(filename)) + n = len(other) + keys = np.fromiter( + (k for (k, v) in other.items()), dtype=HASH_TYPE, count=n + ) + values = np.fromiter( + (v for (k, v) in other.items()), dtype=np.uint8, count=n + ) + self.merge(keys, values) + + FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet +except ImportError: + GETPY_WARNING = True + FlatHashSet = NaiveHashSet + + +def timeit(message, function, *args): + start = time.time() + function(*args) + end = time.time() + print(message, f"took {end - start:.0f}s") + + +def compare_load(*filenames): + assert filenames, "No file given" + + def load_list(): + hashes = [] + for f in filenames: + h = FlatHashSet() + h.load(f) + print(f"Loaded {h} from {f}.") + hashes.append(h) + return hashes + + def load_all(load, ext): + hashes = FlatHashSet() + for f in filenames: + load(hashes, f + ext) + + def dump_all(hashes, dump, ext): + for h, f in zip(hashes, filenames): + dump(h, f + ext) + + hashes = load_list() + dump_gp = getattr(FlatHashSet, "dump_gp") + if dump_gp is not None: + timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test") + timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test") + timeit( + "Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test" + ) + + load_gp = getattr(FlatHashSet, "load_gp") + if load_gp is not None: + timeit("Loading using gp.load", load_all, load_gp, ".gp.test") + timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test") + timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test") + + # Loading 10 shards: + # [dedup] Dumping using gp.dump took 52s + # [dedup] Dumping using dump_np took 270s + # [dedup] Dumping using dump_np2 took 483s + # + # [dedup] Loading using gp.load took 654s + # [dedup] Loading using load_np took 82s + # [dedup] Loading using load_np2 took 76s + + +if __name__ == "__main__": + compare_load(*sys.argv[1:]) diff --git a/cc-multilingual-main/cc_net/cc_net/get_wiki_cirrus.py b/cc-multilingual-main/cc_net/cc_net/get_wiki_cirrus.py new file mode 100644 index 0000000000000000000000000000000000000000..a3e1d43ff8773a6e15a4e290cd37597178eea42d --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/get_wiki_cirrus.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Creates mono-lingual corpus from Wikipedia. +""" + +import functools +import re +import subprocess +import urllib.request +from pathlib import Path +from typing import Dict + +import func_argparse +from bs4 import BeautifulSoup # type: ignore + +from cc_net import jsonql, text_normalizer + +CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch" +CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz") + + +def tmp(file: Path) -> Path: + return file.parent / ("tmp." + file.name) + + +def opening(file: Path, output: Path = None, n_docs: int = 1_000_000): + """Will dump the tokenized opening text of the given Wikipedia. + + Args: + - file: File containing the Wikipedia dump. + - output: Output file. + - n_docs: How many docs to parse + - tokenize: whether to tokenize the text + - lang: Language code used to chose the tokenizer + """ + assert file.exists() + return jsonql.run_pipes( + functools.partial(extract_opening_text, n_docs=n_docs), + file=file, + output=tmp(output) if output else None, + ) + if output: + tmp(output).replace(output) + + +def extract_opening_text(source, n_docs: int = 10_000): + i = 0 + for doc in jsonql.read_jsons(source): + if not doc: + continue + + text = doc.get("opening_text") + if not text: + continue + + yield text_normalizer.normalize(text) + i += 1 + if i >= n_docs: + break + + +def dl(lang: str, output_dir: Path, date: str = None): + """Download the cirrus extract for the given lang. + + See https://dumps.wikimedia.org/other/cirrussearch for the full list of files. + + Args: + - lang: The Wikipedia code for the language. + - output_dir: Output directory. File will be `{lang}.json.gz` + - date: Date of a specific Cirrus dump. + """ + + urls = get_cirrus_urls(date) + assert ( + lang in urls + ), f"--lang {lang} not found. Available languages are: {urls.keys()}" + + assert output_dir, "--output_dir folder needed." + output_dir.mkdir(exist_ok=True) + output = output_dir / (lang + ".json.gz") + print(f"Downloading {lang} wiki from {urls[lang]} to {output}") + wget(urls[lang], output) + + +def get_cirrus_urls(date: str = None) -> Dict[str, str]: + if date is None: + cirrus_page = BeautifulSoup( + urllib.request.urlopen(CIRRUS_URL), features="html.parser" + ) + dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")] + dumps.remove("..") + dumps.remove("current") + # We take the oldest dump since the most recent might be incomplete. + # The page only link to the N latest dumps so the dump won't be too old. + date = min(dumps) + + cirrus_url = "/".join((CIRRUS_URL, date)) + print("Will use the Wikipedia dump from:", date, cirrus_url) + cirrus_page = BeautifulSoup( + urllib.request.urlopen(cirrus_url), features="html.parser" + ) + urls = {} + for link in cirrus_page.findAll("a"): + match = CIRRUS_DUMP_RE.match(link.get("href")) + if not match: + continue + + urls[match.group(1)] = "/".join([cirrus_url, link.get("href")]) + assert urls, f"No valid download urls found at {cirrus_url}" + return urls + + +def wget(url: str, output: Path): + subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True) + tmp(output).replace(output) + assert ( + output.stat().st_size > 10_000 + ), f"File {output} downloaded from {url} looks too small" + + +if __name__ == "__main__": + func_argparse.main(dl, opening) diff --git a/cc-multilingual-main/cc_net/cc_net/jsonql.py b/cc-multilingual-main/cc_net/cc_net/jsonql.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ab405a2af88c56874adef04bd790859640421a --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/jsonql.py @@ -0,0 +1,1340 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Manipulate files containing one json per line. +""" +import argparse +import collections +import contextlib +import functools +import glob +import gzip +import importlib +import inspect +import io +import itertools +import json +import logging +import multiprocessing +import os +import re +import sys +import tempfile +import time +import typing as tp +import warnings +import zlib +from pathlib import Path +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + TextIO, + Tuple, + Union, +) + +import numpy as np +import psutil # type: ignore +import requests +from typing_extensions import Protocol + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s", + datefmt="%Y-%m-%d %H:%M", +) + +NEWLINE = " N3WL1N3 " + +FilterFn = Callable[[dict], bool] +FileDescriptor = Union[Path, List[Path], str] +WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None] +ReadableFileLike = Union[Iterable[str], FileDescriptor, None] + + +def io_parser(): + """Parser shared by all commands to get input/output files.""" + parser = argparse.ArgumentParser(add_help=False) + file_help = """File to read from. Can be specified several times for several files. + Be careful that bash will expand glob patterns **before** sending the args + to python. To use globs put it inside single quotes: + jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1 + jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1 + [Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1 + [Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1 + """ + parser.add_argument("-f", "--file", type=Path, action="append", help=file_help) + parser.add_argument("-o", "--output", type=Path, default="-") + parser.add_argument("--processes", type=int, default=1) + return parser + + +def get_parser(): + parser = argparse.ArgumentParser( + description="Read a set of json files and allow to query them" + ) + subparsers = parser.add_subparsers() + + def add_subparser(function, arguments): + doc = function.__doc__.split("\n")[0] + p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()]) + p.set_defaults(command=function) + for k, v in arguments.items(): + p.add_argument(k, **v) + + add_subparser( + select, + { + "columns": dict(nargs="+", help="Extract the value of the given fields"), + "--skip_empty": dict( + action="store_true", help="Skip lines without the requested fields" + ), + "--separator": dict( + default="\t", help="Separator to use between the different columns" + ), + "--newline": dict( + default=NEWLINE, + help="Replace newlines found in the text by the given string", + ), + }, + ) + + add_subparser( + where, + { + "clauses": dict(nargs="+", help=""), + "--requires": dict( + action="append", help="Python module required by the clauses code." + ), + }, + ) + + add_subparser( + merge, + { + "columns": dict(nargs="+", help=""), + "--separator": dict( + default="\t", help="Separator to use between the different columns" + ), + "--newline": dict( + default=NEWLINE, help="Replace the given string by actual newlines" + ), + }, + ) + + add_subparser( + describe, + { + "columns": dict(nargs="*", help=""), + "--bins": dict( + default="auto", help="Number of bins for computing the histograms" + ), + "--cumulative": dict( + action="store_true", help="Compute cumulative histograms" + ), + "--weights": dict(type=str, help="Column used to weight histograms"), + }, + ) + + add_subparser(split, {"--pattern": dict(type=str)}) + add_subparser(shard, {}) + return parser + + +def _split_array(array, sep): + last = 0 + for i, x in enumerate(array): + if x != sep: + continue + yield array[last:i] + last = i + 1 + if last != len(array): + yield array[last:] + + +def main(raw_args): + parser = get_parser() + pipeline = [] + file = "-" + output = "-" + processes = 1 + + for args_group in _split_array(raw_args, "--"): + args = vars(parser.parse_args(args_group)) + command = args.pop("command") + file = args.pop("file") or file + output = args.pop("output") or output + processes = args.pop("processes") or processes + pipeline.append(as_pipe(command, args)) + + if not pipeline: + parser.print_help() + return + + run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes) + + +class Transformer: + """ + Wrapper around functions transforming documents. + + This allows `run_pipes` to automatically parallelize the pipeline. + Provides: + * Automatic logging. Logging can be changed with the `summary` method. + Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable. + * Automatic parallelization without pickling. The transformers are shared + across processes, and the object is usually not pickled. + * Basic pickling / unpickling in case it's still needed. + By default will only pickle the arguments passed to the constructor. + * Delayed initialization. Internal state which is not pickable should be set + inside the `_prepare` function. + """ + + parallelisable: bool = True + expect_json: bool = False + warn_when_pickling: bool = False + ready: bool = False + + def __init_subclass__(cls, expect_json: bool = None): + """Detects if the subclass expects json as input.""" + spec = inspect.getfullargspec(cls.do) + if expect_json is None: + expect_json = spec.annotations.get(spec.args[1], None) == dict + + cls.expect_json = expect_json + + def __new__(cls, *args, **kwargs): + """Creates the transformer and save the arguments passed to the constructor.""" + t = super().__new__(cls) + Transformer.__init__(t, args, kwargs) + return t + + def __init__(self, state_args: tuple = None, state_kwargs: dict = None): + """ + Init the transformer counters. + + If state_args/state_kwargs are set they will override whatever was + originally passed to the subclass constructor. + """ + if state_args is not None: + self.__args = state_args + if state_kwargs is not None: + self.__kwargs = state_kwargs + + self.start_time = time.time() + self.__last_log = self.start_time + self.processed = 0 + # Log every 5 min unless specified other wise. + self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60)) + self.__cls = type(self) + self._logger = logging.getLogger(self.__cls.__name__) + + def __call__(self, x): + assert self.ready, f"{self} is not ready." + if x is None: + return + y = self.do(x) + self.processed += 1 + if time.time() - self.__last_log > self._log_freq: + self.log_summary() + return y + + def do(self, x): + raise NotImplementedError(f"'do' not implemented in {type(self)}") + + def summary(self) -> List[str]: + return [self.speed_summary()] + + def speed_summary(self) -> str: + delay = time.time() - self.start_time + h = delay / 3600 + s = self.processed / delay + return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)." + + def log(self, message): + self._logger.info(message) + + def log_summary(self) -> None: + if not self.ready: + self.log("Not ready.") + return + summ = self.summary() or [] + for line in summ: + self.log(line) + self.__last_log = time.time() + + def map(self, source: Iterable) -> Iterator: + if self.ready: + for x in source: + yield self(x) + # since we have been prepared by caller, + # caller is also responsible for calling `close`. + return + else: + with self: + for x in source: + yield self(x) + + def __getstate__(self) -> Tuple[tuple, dict, bool]: + return (self.__args, self.__kwargs, self.expect_json) + + def __setstate__(self, state: Tuple[tuple, dict, bool]): + if self.warn_when_pickling: + warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.") + (args, kwargs, expect_json) = state + # When unpickling `__new__` isn't called so we have to doit ourselves. + Transformer.__init__(self, state_args=args, state_kwargs=kwargs) + type(self).__init__(self, *args, **kwargs) + assert self.expect_json == expect_json + # __setstate__ is called by multiprocessing right before calling + # the object so we need to initialize everything. + self.__enter__() + + def _prepare(self) -> None: + pass + + def __enter__(self) -> "Transformer": + # In multiprocessing __enter__ is always called twice, so we are idempotent. + # Because we call __enter__ when deserializing this transformer and + # also when the parent transformer is deserialized. + self.start_time = time.time() + if self.ready: + return self + self._prepare() + self.ready = True + return self + + def __exit__(self, *args) -> None: + self.close() + self.log_summary() + + def close(self) -> None: + pass + + +def as_pipe(transformer, kwargs): + if isinstance(transformer, type): + return transformer(**kwargs) + return lambda source: transformer(source, **kwargs) + + +def compose(fns: List[Transformer]) -> Transformer: + if len(fns) == 1: + return fns[0] + return MultiTransformer(fns) + + +class MultiTransformer(Transformer): + def __init__(self, transformers: List[Transformer]): + super().__init__() + self.transformers = transformers + + def __repr__(self) -> str: + pipeline = " | ".join(type(t).__name__ for t in self.transformers) + return f"<{pipeline}>" + + def do(self, x): + for t in self.transformers: + x = t(x) + return x + + def _prepare(self): + for t in self.transformers: + t.__enter__() + return self + + def __exit__(self, *args): + for t in self.transformers: + t.__exit__(*args) + + def summary(self): + return itertools.chain(*(t.summary() for t in self.transformers)) + + +class Mapper(Transformer): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def do(self, x): + return self.fn(x) + + +def run_pipe( + command, + kwargs: dict = None, + file: ReadableFileLike = None, + output: WritableFileLike = None, +): + kwargs = kwargs or {} + if isinstance(kwargs, argparse.ArgumentParser): + kwargs = vars(kwargs.parse_args()) + file = file or Path(kwargs.pop("file", "-")) + output = output or Path(kwargs.pop("output", "-")) + + return run_pipes(as_pipe(command, kwargs), file=file, output=output) + + +def run_pipes( + *fns: Union[Transformer, Callable[[Iterable], Iterable]], + inputs: Iterable[dict] = None, + file: ReadableFileLike = None, + output: WritableFileLike = None, + processes: int = 1, + chunksize: int = 10_000, +): + """ + Run full document processing pipeline. + + - fns: list of functions to run over the documents. Can be: + * `Iterable -> Iterable` function + * jsonql.Transformer instance + Using transformers allow the pipeline to process documents in parallel. + - inputs: iterable to read the documents from + - file: if inputs is not given, will read documents from this file. + - output: writable file like. + - processes: number of processes to use. -1 means all CPU available. + - chunksize: chunksize for multiprocessing.Pool.imap_unordered + """ + expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json + if expect_json and inputs is None: + fns = (JsonReader(),) + fns + transformers = [] + for t in fns: + if not isinstance(t, Transformer): + break + if not t.parallelisable: + break + transformers.append(t) + pipes = fns[len(transformers) :] + + log = logging.getLogger(__name__).info + if inputs is None: + data: Iterable = open_read(file) + else: + data = inputs + + if processes == -1: + processes = os.cpu_count() or 0 + + with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack: + if transformers: + log(f"preparing {transformers}") + transform = stack.enter_context(compose(transformers)) + if processes <= 1: + data = transform.map(data) + else: + p = multiprocessing.current_process() + log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}") + pool = stack.enter_context( + multiprocessing.Pool( + processes=processes, + initializer=_set_global_transformer, + initargs=(transform,), + ) + ) + data = pool.imap_unordered( + _global_transformer, data, chunksize=chunksize + ) + + for fn in pipes: + if isinstance(fn, Transformer): + data = fn.map(data) + else: + data = fn(data) + + write_jsons(data, output) + + +# Allows to share transformer acroos subprocess. +# Used by `run_pipes` +_GLOBAL_TRANSFORMER: Optional[Transformer] = None + + +def _set_global_transformer(transformer: Transformer): + global _GLOBAL_TRANSFORMER + p = multiprocessing.current_process() + logging.info( + f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}" + ) + assert transformer.ready, f"{transformer} isn't ready" + _GLOBAL_TRANSFORMER = transformer + + +def _global_transformer(document: str) -> Optional[dict]: + assert _GLOBAL_TRANSFORMER is not None + return _GLOBAL_TRANSFORMER(document) + + +def lines(file: ReadableFileLike) -> Iterator[str]: + return (line.strip("\n") for line in open_read(file)) + + +def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]: + reader = JsonReader(strict=strict) + lines = open_read(file) + for line in lines: + if line is None: + continue + yield reader(line) + + reader.log_summary() + + +def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None: + eol = os.linesep + with open_write(file) as o: + for res in source: + if res is None: + continue + if isinstance(res, dict): + json.dump(res, o, ensure_ascii=False) + o.write(eol) + continue + if isinstance(res, str): + res = res.rstrip("\n") + print(res, file=o) + + +class JsonReader(Transformer): + def __init__(self, strict: bool = False): + super().__init__() + self.ready = True + self.strict = strict + self.num_errors = 0 + + def do(self, line: str) -> Optional[dict]: + if line is None: + return None + if isinstance(line, dict): + return line + line = line.rstrip("\n") + if not line: + return None + try: + return json.loads(line) + except json.decoder.JSONDecodeError as e: + self.log_error(e) + if self.strict: + raise + return None + + def log_error(self, e: json.decoder.JSONDecodeError): + self.num_errors += 1 + if self.num_errors > 10: + return + + MAX_LEN = 80 + snippet, snippet_len = e.doc, len(e.doc) + col = e.pos + if snippet_len > MAX_LEN: + if col < MAX_LEN: + start = 0 + elif snippet_len - col < MAX_LEN: + start = snippet_len - MAX_LEN + else: + start = col - MAX_LEN // 2 + snippet = e.doc[start : start + MAX_LEN] + col = col - start + logging.warning( + "\n".join( + [ + f"Invalid json (length={len(e.doc)}) {e}", + snippet, + " " * (col - 1) + "^", + ] + ) + ) + + def summary(self): + summ = super().summary() + if self.num_errors > 0: + summ.append(f"Skipped {self.num_errors} invalid json.") + return summ + + +def compile_column(column, newline): + if callable(column): + return column + + if column == "*": + return json.dumps + + if re.match(r"[_a-z][_a-z0-9]*", column): + + def extract_col(doc): + v = doc.get(column, "") + if isinstance(v, str) and newline != "\n": + v = v.rstrip("\n").replace("\n", newline) + return v + + return extract_col + + return compile_expr(column) + + +def select(lines, columns, skip_empty=False, separator="\t", newline="\n"): + """Yields the content of the requested columns.""" + column_parsers = [compile_column(c, newline) for c in columns] + for doc in read_jsons(lines): + values = [] + empty = True + for parse_col in column_parsers: + v = parse_col(doc) + values.append(str(v) or "") + empty = empty and v is None + + if skip_empty and empty: + continue + + yield separator.join(values) + + +def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None): + if not isinstance(clause, str): + return clause + + args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})" + args_list = list(re.findall(args_re, clause)) + if not args_list: + # This is only a warning because you may want to have eg random sampling + # that doesn't depend on the document. + logging.warn( + f"Warning: No variable found in expression: <{clause}>\n" + "Variables should be written inside braces, eg: {language}=='en'" + ) + python_like = re.sub(args_re, r"doc.get('\1', None)", clause) + requires = requires or [] + modules = {r: importlib.import_module(r) for r in requires} + return eval(f"lambda doc: {python_like}", modules) + + +class where(Transformer): + """Filters the data using python code. + + Ex: `jsonql where 'len({text}) > 100'` + """ + + def __init__( + self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = [] + ): + super().__init__() + self.raw_clauses = clauses + self.requires = requires + self.n_selected = 0 + self.clauses: List[FilterFn] = [] + + def _prepare(self): + self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses] + + def do(self, doc: dict) -> Optional[dict]: + assert self.clauses + if not doc or not all((c(doc) for c in self.clauses)): + return None + self.n_selected += 1 + return doc + + def summary(self): + n_selected, n_docs = self.n_selected, self.processed + selectivity = n_selected / n_docs if n_docs else 0 + return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"] + + +def merge(lines, columns, separator="\t", newline=NEWLINE): + """Reads tab separated columns and output a json using the given headers. + + Headers are of form {key}[%{type}] + {type} can be one of {"f": float, "i": int, "b": bool, "s": string}. + Default type is string. + A special header "_" means interpret this column as json, and append all other + columns to it. Must appear only once and on last position. + + Ex: + `echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}` + `echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}` + `echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}` + """ + handle_newlines = lambda s: s.replace(newline, "\n") + type_mapping: Dict[str, Callable] = { + "f": float, + "i": int, + "b": bool, + "s": handle_newlines, + } + type_parsing = [ + type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns + ] + columns = [f.split("%")[0] for f in columns] + doc_index = columns.index("_") if "_" in columns else -1 + read_json = JsonReader() + + def parse(line): + parts = line.split(separator, len(columns) - 1) + doc: Dict[str, tp.Any] = {} + for i, value in enumerate(parts): + if columns[i] == "_": + doc.update(read_json(parts[doc_index])) + else: + try: + doc[columns[i]] = type_parsing[i](value) + except ValueError: + logging.error( + f"Error when parsing column {i} of line: {line[:100]}..." + ) + return doc + + for line in lines: + yield json.dumps(parse(line)) + + +class split(Transformer): + """Split a files in several smaller files based on the value of a field.""" + + # Not parallelisable since we are writing to files. + parallelisable = False + + def __init__( + self, + pattern: Union[Path, str] = None, + split_fn: Callable[[dict], str] = None, + mkdir: bool = False, + ): + super().__init__() + assert not ( + pattern and split_fn + ), "split can't have both a pattern and a split_fn" + if split_fn is not None: + self.split_fn = split_fn + else: + assert pattern, "split need either a pattern or a split_fn" + self.split_fn = self.make_split_fn(str(pattern)) + self.mkdir = mkdir + self.o: dict = {} + + def make_split_fn(self, pattern: str) -> Callable[[dict], str]: + candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern)) + return lambda doc: pattern.format(**{c: doc[c] for c in candidates}) + + def do(self, doc): + filename = self.split_fn(doc) + if not filename: + return + o = self.o.get(filename, None) + if o is None: + if self.mkdir: + Path(filename).parent.mkdir(parents=True, exist_ok=True) + self.o[filename] = open_write(filename) + print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True) + + def summary(self): + summ = super().summary() + summ.append(f"Found {len(self.o)} splits.") + return summ + + def close(self): + for file in self.o.values(): + file.close() + + +def histogram(values, bins, weights): + hist, bins = np.histogram(values, bins=bins) + # n_bins = len(hist) + + if weights is not None: + # Bins can't be auto-determined if weights is supplied. + # So we first compute the bins without the weights then recompute + # the histogram with the weights. + hist, bins = np.histogram(values, bins=bins, weights=weights) + # cumsum = np.cumsum(hist) + # total = cumsum[-1] + + # for i in range(n_bins - 1): + # if cumsum[i] / total > 0.9: + # useful_range = np.linspace(bins[0], bins[i + 1], n_bins) + # new_bins = np.append(useful_range, [bins[-1]]) + # return np.histogram(values, bins=new_bins, weights=weights) + + return hist, bins + + +def _parse_bins(bins): + try: + if isinstance(bins, str): + if "," in bins: + bins = [int(b) for b in bins.split(",")] + else: + bins = int(bins) + except ValueError: + pass + return bins + + +ALL_DOCUMENTS = "" +MAX_LABEL_LEN = 100 + + +def bar_chart(hist, bins): + n = sum(hist) + max_h = max(hist) + out = [] + for i, h in enumerate(hist): + h_size = 80 * h // max_h + dh_size = 80 * (h - hist[i - 1]) // max_h + if h_size == 0 or dh_size == 0: + continue + bar = "█" * h_size + out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}") + out.append(f"{bins[-1]:8.3f}") + return out + + +def display_stats(stats, key, weights=None, bins="auto", cumulative=False): + out = [] + documents = stats[ALL_DOCUMENTS] + count = stats.get(key, 0) + r = count / documents if documents else 0 + out.append(f"Field {key} saw {count} times ({r:5.1%})") + + length = stats.get(key + ".length", None) + avg_length = length // count if length else 0 + if length is not None: + out[-1] += f", average length is {length // count}" + + values = stats.get(key + ".val", None) + if values: + out[-1] += f", histogram is: (bins={bins})" + if weights: + if weights not in stats: + logging.warn(f"Warning: weights column {weights} not found.") + if weights + ".val" not in stats: + logging.warn( + f"Warning: weights column {weights} is not a numeric column." + ) + weights = stats.get(weights + ".val") + hist, bins = histogram(values, _parse_bins(bins), weights) + if cumulative: + hist = np.cumsum(hist) + out += bar_chart(hist, bins) + + cnt = stats.get(key + ".cnt", None) + if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1: + cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True) + out[-1] += ", top 100 labels:" + for label, n in cnt[:100]: + if n < 5: + continue + out.append(f"{label:25}: {n:6} ({n / count:5.1%})") + + return out + + +def describe(source, columns=None, weights=None, **kwargs): + """Compute some statistics about a dataset. + + Stats can be restricted to a subset of columns.""" + MAX_HIST_SIZE = 100_000_000 + MAX_CNT_SIZE = 1000 + stats = {ALL_DOCUMENTS: 0} + needed = columns + [weights] if columns else None + + for doc in read_jsons(source): + stats[ALL_DOCUMENTS] += 1 + for k, v in doc.items(): + if needed and k not in needed: + continue + stats[k] = get_or_set(stats, k, 0) + 1 + if isinstance(v, str): + stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v) + if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels + continue + cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int)) + if v in cnt or len(cnt) < MAX_CNT_SIZE: + cnt[v] += 1 + elif type(v) in (int, float): + values = get_or_set(stats, k + ".val", []) + if len(values) < MAX_HIST_SIZE: + values.append(v) + elif type(v) is list and len(v) and type(v[0]) in (int, float): + values = get_or_set(stats, k + ".val", []) + if len(values) < MAX_HIST_SIZE: + values += v + elif type(v) is dict: + cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int)) + for label in v: + if label in cnt or len(cnt) < MAX_CNT_SIZE: + cnt[label] += 1 + + documents = stats[ALL_DOCUMENTS] + yield f"Stats computed on {documents} documents:" + for k in stats: + if columns and k not in columns: + continue + if "." in k or k == ALL_DOCUMENTS: + continue + for line in display_stats(stats, k, weights=weights, **kwargs): + yield line + + +def shard(lines): + """Shard a file in several smaller ones.""" + # The creation of the shard is handle in a generic way. Do we need this ? + return lines + + +# *** Utils *** + + +def get_or_set(dictionary, key, default): + if key not in dictionary: + dictionary[key] = default + return dictionary[key] + + +class SimpleIO(Protocol): + """A subset of methods from TextIO.""" + + def close(self) -> None: + ... + + def write(self, line: str) -> int: + ... + + def __enter__(self) -> "SimpleIO": + ... + + def __exit__(self, exc_type, exc_value, traceback): + ... + + +def open_read(filename: ReadableFileLike) -> Iterable[str]: + """Open the given file, list of files or files matching the given glob and read lines. + + `filename` is None or "-" -> reads from stdin + `filename` is a Path / str -> interprets filename as a glob and open files matching it + `filename` is a list -> opens sequentially all files from the list using `open_read` + `filename` is something else -> returns the object wrapped in a `nullcontext` + This allows to pass already openened files or iterables. + + `open_read` will decompress gzip files, given they have ".gz" suffix. + """ + if filename is None: + return sys.stdin + + if isinstance(filename, list): + assert isinstance(filename[0], Path) + if len(filename) == 0: + return [] + if len(filename) > 1: + return _yield_from(filename) + filename = tp.cast(Path, filename[0]) + if isinstance(filename, str): + if filename.startswith("http://") or filename.startswith("https://"): + return open_remote_file(filename) + + filename = Path(filename) + if not isinstance(filename, Path): + # we might have received an iterable, return it unmodified. + return filename # type: ignore + + # Expand glob patterns only when reading + files = [Path(f) for f in sorted(glob.glob(str(filename)))] + if len(files) > 1: + return _yield_from(files) + if len(files) == 1: + filename = files[0] + + assert isinstance(filename, Path) + + if filename.name.endswith("]"): + return block_reader(filename) + + logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'") + if filename.suffix == ".gz": + file: TextIO = gzip.open(filename, "rt") # type: ignore + else: + file = open(filename, "rt") + + return _close_when_exhausted(file) + + +def _close_when_exhausted(file: TextIO) -> Iterable[str]: + with file: + yield from file + + +def _yield_from(files: list) -> Iterable[str]: + for file in files: + yield from open_read(file) + + +def open_write( + filename: WritableFileLike, max_size: str = "4G" +) -> tp.ContextManager[TextIO]: + """Open the given file, list of files or files matching the given glob. + + The return value is a ContextManager meant to be used inside a `with` block: + ``` + with open_write("foo.txt") as o: + ... + + Write mode: + replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`. + If filename ends with ".gz", creates a blocked gzip file with random access. + """ + if filename is None: + return contextlib.nullcontext(sys.stdout) + + if isinstance(filename, list): + if len(filename) > 1: + return MultiFile(filename, "w", max_size) + else: + filename = tp.cast(Path, filename[0]) + if isinstance(filename, str): + filename = Path(filename) + if not isinstance(filename, Path): + assert hasattr(filename, "write"), f"{filename} doesn't have a .write method." + # We return a 'TextIO' even though we only check for `.write` method, + # this works better with eg `print`. + return contextlib.nullcontext(tp.cast(TextIO, filename)) + + mode = "wt" + if "?" in filename.name: + return sharded_file(filename, mode, max_size) + + logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}") + # TODO: should we use another format ? + if filename.suffix == ".gz": + return BlockedGzipWriter(Path(filename), mode, block_size="64M") + + return open(filename, "wt") + + +def parse_size(size): + unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3} + unit = size[-1].upper() + assert ( + unit in unit_map + ), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}." + return int(size[:-1]) * unit_map[unit] + + +class MultiFile(SimpleIO): + def __init__(self, files: Iterable[Path], mode="w", max_size="4G"): + self.name = str(files) + self.mode = mode + self.files = iter(files) + self.max_size = parse_size(max_size) + self.current_handle: Optional[TextIO] = None + self.current_block_size = 0 + self._open_next_handle() # Opening 1st handle allows to write directly. + + def write(self, content) -> int: + # Avoid splitting newlines to a new file. + # use current_block_size since it's faster than `tell()` + if content != "\n" and self.current_block_size >= self.max_size: + self._open_next_handle() + if self.current_handle is None: + raise Exception("No more files to write to...") + + written = self.current_handle.write(content) + self.current_block_size += written + return written + + def _open_next_handle(self) -> bool: + self.close() + file = next(self.files, None) + if file is None: + return False + + self.current_handle = open_write(file).__enter__() + self.current_block_size = 0 + return True + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + @property + def closed(self): + return self.current_handle is None + + def close(self): + if self.current_handle is None: + return + + # log("Closing", self.current_handle.name, "with mode", self.current_handle.mode) + self.current_handle.__exit__(None, None, None) + self.current_handle = None + + +# not sure it helps since connections are reseted anyway. +_session = functools.lru_cache()(requests.Session) + + +def request_get_content(url: str, n_retry: int = 3) -> bytes: + """Retrieve the binary content at url. + + Retry on connection errors. + """ + t0 = time.time() + logging.info(f"Starting download of {url}") + for i in range(1, n_retry + 1): + try: + r = _session().get(url) + r.raise_for_status() + break + except requests.exceptions.RequestException as e: + # Sleep and try again on error, unless it's a 404. + message = e.args[0] if isinstance(e.args[0], str) else "" + if i == n_retry or "Client Error" in message: + raise e + warnings.warn( + f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})" + ) + time.sleep(10 * 2 ** i) + dl_time = time.time() - t0 + dl_speed = len(r.content) / dl_time / 1024 + logging.info( + f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)" + ) + return r.content + + +def open_remote_file(url: str, cache: Path = None) -> Iterable[str]: + """Download the files at the given url to memory and opens it as a file. + Assumes that the file is small, and fetch it when this function is called. + """ + if cache and cache.exists(): + return open_read(cache) + + # TODO: open the remote file in streaming mode. + # The hard part is that we need to write the content on disk at the same time, + # to implement disk caching. + raw_bytes = request_get_content(url) + content = io.BytesIO(raw_bytes) + if url.endswith(".gz"): + f: TextIO = gzip.open(content, mode="rt") # type: ignore + else: + f = io.TextIOWrapper(content) + + if cache and not cache.exists(): + # The file might have been created while downloading/writing. + tmp_cache = _tmp(cache) + tmp_cache.write_bytes(raw_bytes) + if not cache.exists(): + tmp_cache.replace(cache) + else: + tmp_cache.unlink() + + return _close_when_exhausted(f) + + +def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile: + folder, name = file_pattern.parent, file_pattern.name + assert "?" in name, f"Can't expand give file_pattern: {file_pattern}" + + n = name.count("?") + assert 0 < n < 8 + assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}" + assert "r" not in mode + files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n)) + + return MultiFile(files, mode, max_size) + + +class SplitFile: + def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"): + assert mode == "r" + size = os.path.getsize(filename) + self.handle = open(filename, mode) + start = chunk * size // n_chunks + self.end: int = (chunk + 1) * size // n_chunks + + if start > 0: + self.handle.seek(start - 1) + # Skip incomplete line. This avoid crashing when reading eg the middle + # of a unicode char. `self.handle.buffer` is a binary file reader. + self.handle.buffer.readline() # type: ignore + + def __enter__(self): + return self + + def __iter__(self): + while True: + line = self.handle.readline() + if not line: + return + + yield line + if self.handle.tell() >= self.end: + return + + def readlines(self): + return list(self.__iter__()) + + def close(self): + self.handle.close() + + def __exit__(self, *args): + self.close() + + +def get_block_readers(filename: Path, n_readers, mode="t"): + index_filename = filename.parent / (filename.name + ".index") + if not index_filename.exists(): + return [gzip.open(filename, "r" + mode)] + index: List[int] = np.load(index_filename) + n_chunks = len(index) + chunk_per_reader = int(np.ceil(n_chunks / n_readers)) + n_readers = int(np.ceil(n_chunks / chunk_per_reader)) + + start = 0 + readers = [] + for i in range(n_readers): + end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)] + r = _blocked_gzip_reader(filename, start, end, mode) + readers.append(r) + start = end + return readers + + +def block_reader(filename: Path) -> Iterable[str]: + root, pattern = str(filename)[:-1].split("[", 1) + assert root.endswith(".gz"), "Can only read block of a .gz file for now." + + ii, nn = pattern.strip().split("/") + i, n_readers = int(ii), int(nn) + + index_filename = root + ".index" + assert os.path.exists( + index_filename + ), f"Index {index_filename} not found for {filename}" + index: List[int] = np.load(index_filename) + n_chunks = len(index) + chunk_per_reader = int(np.ceil(n_chunks / n_readers)) + n_readers = int(np.ceil(n_chunks / chunk_per_reader)) + # I'm not sure how to handle the case where there is less reader than expected. + # Currently we return empty readers. + + start = 0 + if i > 0: + start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)] + end = index[min(i * chunk_per_reader, n_chunks - 1)] + return _blocked_gzip_reader(root, start, end, mode="t") + + +def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]: + handle = gzip.open(filename, "r" + mode) + handle.seek(start) + try: + while handle.tell() < end: + line = handle.readline() + if not line: + break + yield line + finally: + handle.close() + + +class BlockedGzipWriter(MultiFile): + """Writes a Gzip files which can be read by block. + + Decreasing the block size may hurt compression, but provides more split points. + """ + + def __init__(self, filename: Path, mode: str, block_size: str = "256M"): + assert "w" in mode + self.filename = Path(filename) + self.index: List[int] = [] + self.zipfile: Optional[gzip.GzipFile] = None + super().__init__([], mode, block_size) + + def _open_next_handle(self) -> bool: + """Here we never actually close/open handles, + we just write the end of block sequence.""" + if not self.current_handle: + mode = self.mode + "t" + self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode)) + assert isinstance(self.current_handle.buffer, gzip.GzipFile) + self.zipfile = self.current_handle.buffer + return True + + # Use Z_FULL_FLUSH to allow random access: + # https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313 + self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore + self.index.append(self.current_handle.tell()) + self.current_block_size = 0 + return True + + def flush(self): + assert self.current_handle is not None + self.current_handle.flush() + + def close(self): + if self.current_handle is None: + return + self.current_handle.flush() + self.index.append(self.current_handle.tell()) + self.current_handle.close() + self.current_handle = None + index = np.array(self.index, dtype=np.uint64) + with open(str(self.filename) + ".index", "wb") as o: + np.save(o, index) + + +def grouper(iterable, n): + group = [] + for x in iterable: + group.append(x) + if len(group) == n: + yield group + group = [] + if group: + yield group + + +PROCESS = psutil.Process() + + +def mem_footprint_gb(pid=None): + rss = PROCESS.memory_info().rss + return rss / 1_000_000_000 + + +def _tmp(output: Path) -> Path: + suffix = "".join(output.suffixes) + suffix = ".tmp" + suffix + prefix = output.name[: -len(suffix)] + _, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix) + return Path(tmp_path) + + +@functools.lru_cache() +def _tmp_dir() -> Path: + job_id = os.environ.get("SLURM_JOB_ID") + if job_id: + return Path("/scratch/slurm_tmpdir") / job_id + + checkpoint = Path("/checkpoint") / os.environ.get("USER", "") + if checkpoint.exists(): + tmp = checkpoint / "tmp" + tmp.mkdir(exist_ok=True) + return tmp + + return Path("/tmp") + + +if __name__ == "__main__": + multiprocessing.set_start_method("fork") + main(sys.argv[1:]) diff --git a/cc-multilingual-main/cc_net/cc_net/mine.py b/cc-multilingual-main/cc_net/cc_net/mine.py new file mode 100644 index 0000000000000000000000000000000000000000..de180b76a4f8a21041dc1c95f0faa8408ebdea57 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/mine.py @@ -0,0 +1,648 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Main script to download a CC dump, remove duplicates, split by language and +filter the documents. + +The pipeline parameters are described in the `Config` class. +""" + +import hashlib +import json +import time +import warnings +from argparse import ArgumentParser +from collections import defaultdict +from itertools import repeat +from pathlib import Path +from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple + +import func_argparse + +# Local scripts +from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file +from cc_net import regroup as regroup_module +from cc_net import split_by_lang +from cc_net.execution import Executor + +# Constant +FILE_DIR = Path(__file__).parent +CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv" + +DEFAULT_PIPELINE = [ + # "dedup", + "lid", + "keep_lang", + "sp", + "lm", + "pp_bucket", + "drop", + "split_by_lang", +] + + +class Config(NamedTuple): + """ + Mine Common Crawl with the given settings. + + config_name + dump: CC dump id + output_dir: working directory + mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id} + execution: chose how to parallelize the execution + num_shards: number of shards to split the dump + num_segments_per_shard: allow to download a small portion of CC (eg for tests) + min_len: remove documents shorter than this (in chars) + hashes_in_mem: number of shards hashes to use for dedup + lang_whitelist: only treat those languages + lang_blacklist: ignore those languages + lang_threshold: remove docs whose top language score is lower than this + keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all) + lm_dir: folder containing LMs + lm_languages: only use LMs for the following languages + cutoff: cutoff file to use for split in head/middle/tail + mine_num_processes: number of processes to use for mining + target_size: size of finals files produce during `regroup` stage + cleanup_after_regroup: delete intermediary files after regroup + task_parallelism: max number of task to run in parallel + pipeline: restricts the mining pipeline to the given steps. Order is important ! + experiments: (HACK) enable specific experiments in the code + """ + + config_name: str = "base" + dump: str = "2017-51" + output_dir: Path = Path("data") + mined_dir: str = "mined" + execution: str = "auto" + num_shards: int = 1600 + num_segments_per_shard: int = -1 + metadata: Optional[str] = None + min_len: int = 300 + hash_in_mem: int = 50 + lang_whitelist: Sequence[str] = ['hi'] + lang_blacklist: Sequence[str] = [] + lang_threshold: float = 0.5 + keep_bucket: Sequence[str] = [] + lm_dir: Path = Path("data/lm_sp") + cutoff: Path = CUTOFF_CSV + lm_languages: Optional[Sequence[str]] = None + mine_num_processes: int = 16 + target_size: str = "4G" + cleanup_after_regroup: bool = False + task_parallelism: int = -1 + pipeline: Sequence[str] = DEFAULT_PIPELINE + experiments: Sequence[str] = [] + cache_dir: Optional[Path] = None + + def get_executor( + self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1 + ) -> Executor: + name = "_".join((name, self.config_name, *self.experiments)) + return execution.get_executor( + name, + self.output_dir / "logs", + self.execution, + timeout_hour=timeout_hour, + mem_gb=mem_gb, + cpus=cpus, + task_parallelism=self.task_parallelism, + ) + + def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader: + dump_cache: Optional[Path] = None + if self.cache_dir: + self.cache_dir.mkdir(exist_ok=True) + dump_cache = self.cache_dir / self.dump + dump_cache.mkdir(exist_ok=True) + + return process_wet_file.CCShardReader( + self.dump, + shard=shard, + num_shards=self.num_shards, + num_segments_per_shard=self.num_segments_per_shard, + min_len=self.min_len, + cache_dir=dump_cache, + ) + + @classmethod + def from_json(cls, json_file: Path) -> "Config": + raw_lines = json_file.read_text().splitlines() + raw_lines = [l for l in raw_lines if not l.strip().startswith("//")] + json_config = json.loads("".join(raw_lines)) + path_keys = ["cache_dir", "lm_dir", "output_dir"] + for key in path_keys: + if key in json_config: + json_config[key] = Path(json_config[key]) + return Config(**json_config) + + @property + def will_split(self) -> bool: + return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline + + def get_lm_languages(self) -> Sequence[str]: + if self.lm_languages is not None: + return self.lm_languages + + if self.lang_whitelist: + return self.lang_whitelist + + languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")] + if self.lang_blacklist: + languages = [l for l in languages if l not in self.lang_blacklist] + return languages + + def get_mined_dir(self, regroup: bool = False) -> Path: + if self.will_split and not regroup: + return self.output_dir / f"{self.mined_dir}_split" / self.dump + return self.output_dir / self.mined_dir / self.dump + + +BASE_CONFIG = Config() + +BYLANG_CONFIG = Config( + config_name="by_lang", + mined_dir="mined_by_lang", + pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"], +) + +REPRODUCE_CONFIG = Config( + config_name="reproduce", + dump="2019-09", + mined_dir="reproduce", + pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"], + metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0", + # Optional filtering: + # It won't change much the execution speed, but decreases the disk requirement. + # Restrict languages + lang_whitelist=["fr"], + # Restrict perplexity buckets + # Top languages have been split in perplexity buckets according + # to a Wikipedia trained LM. + # The buckets from low perplexity (good) to high (bad) are: + # ["head", "middle", "tail"] + # Languages without a LM have only one bucket "all". + # It won't change much the execution speed, but decreases the disk requirement. + keep_bucket=["head", "all"], + mine_num_processes=1, +) + +TEST_CONFIG = BASE_CONFIG._replace( + config_name="test", + dump="2019-09", + output_dir=Path("test_data"), + execution="local", + num_shards=4, + num_segments_per_shard=1, + hash_in_mem=2, + mine_num_processes=2, + lang_whitelist=["de", "it", "fr"], + target_size="32M", + cleanup_after_regroup=False, + cache_dir=Path("test_data/wet_cache"), +) + +PREDEF_CONFIGS = { + "base": BASE_CONFIG, + "by_lang": BYLANG_CONFIG, + "test": TEST_CONFIG, + "test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"), + "debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0), + "reproduce": REPRODUCE_CONFIG, + "augment": BASE_CONFIG._replace( + config_name="augment", dump="2019-13", lang_blacklist=["en"] + ), +} + + +def tmp(output: Path) -> Path: + return output.parent / (output.stem + ".tmp" + output.suffix) + + +def finalize(tmp_output: Path, output: Path) -> None: + if not tmp_output.exists(): + warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.") + return + + tmp_index = tmp_output.parent / (tmp_output.name + ".index") + tmp_output.rename(output) + + if tmp_index.exists(): + tmp_index.rename(output.parent / (output.name + ".index")) + + +def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]: + if n < 0: + n = len(iterable[0]) + columns: tuple = tuple([] for _ in range(n)) + for row in iterable: + assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}" + for i in range(n): + columns[i].append(row[i]) + + return columns + + +def hashes(conf: Config) -> List[Path]: + """Computes hashes for each shard.""" + + hashes_dir = conf.output_dir / "hashes" / conf.dump + outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)] + missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()] + + if not missing_outputs: + return outputs + + hashes_dir.mkdir(parents=True, exist_ok=True) + # With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for + # overhead due to how the dynamic allocation works. + ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2) + ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs)) + + # Wait a bit so that files appears on the disk. + time.sleep(20) + assert all(o.exists() for o in outputs) + return outputs + + +def _hashes_shard(conf: Config, shard: int, output: Path): + tmp_output = tmp(output) + jsonql.run_pipes( + dedup.HashesCollector(field="raw_content", output=tmp_output), + inputs=conf.get_cc_shard(shard), + ) + finalize(tmp_output, output) + return f"Hashed {output}" + + +HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400] + + +def mine(conf: Config) -> List[Path]: + """Remove dups, run LID and LMs, and split by lang and quality.""" + mined_dir = conf.get_mined_dir() + if conf.will_split: + # Give a directories when splitting + outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)] + else: + # Files otherwise + outputs = [ + mined_dir / f"{shard:04d}.json.gz" for shard in range(conf.num_shards) + ] + + if "mini_again" in conf.experiments: + mined_dir = conf.output_dir / "mini_again" / conf.dump + outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)] + + # TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs + mem_gb = 60 + 1 * conf.hash_in_mem + timeout_hour = 5 + if "hashes" in conf.experiments: + # HACK: used for generating paper figures + outputs = [ + conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz" + for h in HASHES_IN_MEM + ] + mem_gb = int(max(HASHES_IN_MEM) * 1.2) + timeout_hour = 8 + + missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()] + + if "mini_again" in conf.experiments: + missing_outputs = [ + (shard, o) + for shard, o in enumerate(outputs) + if shard in [5, 139] and not o.exists() + ] + + if not missing_outputs: + return outputs + + mined_dir.mkdir(parents=True, exist_ok=True) + ex = conf.get_executor( + f"mine_{conf.dump}", + mem_gb=mem_gb, + timeout_hour=timeout_hour, + cpus=conf.mine_num_processes + 1, + ) + + # Compute hashes firsts. + if "dedup" in conf.pipeline: + hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem)) + hashes_files: Iterable[List[Path]] = [ + hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs + ] + else: + hashes_files = repeat([]) + + ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs)) + + assert all(o.exists() for o in outputs) + return outputs + + +def _get_segment(tmp_output: Path, doc: dict) -> str: + segment: str = doc["cc_segment"].split("/")[-1] + return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz")) + + +def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str: + assert conf.pipeline + tmp_output = tmp(output) + if "hashes" in conf.experiments: + # HACK: used for generating paper figures + hashes_in_mem = shard + hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]] + shard = 0 + cc_shard = conf.get_cc_shard(shard) + + steps: Dict[str, Optional[jsonql.Transformer]] = {} + lang_id = Path("bin") / "lid.bin" + steps["lid_before_dedup"] = split_by_lang.Classifier( + model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5 + ) + steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes) + + steps["lid"] = split_by_lang.Classifier( + model=lang_id, + field="raw_content", + out_field="language", + top=1, + threshold=conf.lang_threshold, + ) + steps["lid_after_dedup"] = split_by_lang.Classifier( + model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5 + ) + + if conf.lang_blacklist: + steps["keep_lang"] = jsonql.where( + [lambda doc: doc.get("language") not in set(conf.lang_blacklist)] + ) + elif conf.lang_whitelist: + steps["keep_lang"] = jsonql.where( + [lambda doc: doc.get("language") in set(conf.lang_whitelist)] + ) + else: + steps["keep_lang"] = None + + tok_field = "tokenized" + steps["sp"] = perplexity.MultiSentencePiece( + {l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()}, + field="raw_content", + output_field=tok_field, + normalize=True, + ) + steps["lm"] = perplexity.DocLM( + {l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()}, + field=tok_field, + output_field="perplexity", + normalize=False, # Normalization is done before SentencePiece + # load_method=kenlm.LoadMethod.PARALLEL_READ, + ) + steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV) + steps["drop"] = perplexity.DropKeys(tok_field) + + steps["keep_bucket"] = None + if conf.keep_bucket: + steps["keep_bucket"] = jsonql.where( + [lambda doc: doc.get("bucket", "all") in conf.keep_bucket] + ) + + if "fetch_metadata" in conf.pipeline: + # TODO: better default + assert conf.metadata is not None + steps["fetch_metadata"] = minify.MetadataFetcher( + f"{conf.metadata}/{conf.dump}/" + ) + + steps["minify"] = minify.Minifier() + + pattern = str(tmp_output / "{language}_{bucket}.json.gz") + steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True) + + steps["split_by_segment"] = jsonql.split( + split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True + ) + + pipeline = filter(None, (steps[s] for s in conf.pipeline)) + + jsonql.run_pipes( + *pipeline, + inputs=cc_shard, + processes=conf.mine_num_processes, + chunksize=100, + # The splitter takes care of writing to files. + output=tmp_output if not conf.will_split else None, + ) + finalize(tmp_output, output) + return f"Mined {output}" + + +def regroup(conf: Config, all_dirs: List[Path]) -> Path: + """Reshards each language/quality after 'mine'.""" + regroup_dir = conf.get_mined_dir(regroup=True) + assert all_dirs + all_files = [f for d in all_dirs for f in d.glob("*.json.gz")] + if not all_files: + print(f"No .json.gz file found in {all_dirs[0]}") + + splits: Dict[str, List[Path]] = defaultdict(list) + for f in all_files: + split = f.name.split(".")[0] + splits[split].append(f) + + print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.") + inputs: List[List[Path]] = [] + outputs: List[Path] = [] + target_size = jsonql.parse_size(conf.target_size) + for split, files in splits.items(): + cuts = list(regroup_module.determine_groups(files, target_size=target_size)) + if not cuts: + continue + + pattern = f"{split}_????.json.gz" + existing_outputs = sorted(regroup_dir.glob(pattern)) + + if not conf.cleanup_after_regroup: + # We still have all the inputs so it is safe to overwrite existing outputs. + assert len(existing_outputs) <= len(cuts) + existing_outputs = [] + + if len(existing_outputs) > 0 and len(cuts) == 1: + # append to existing file if size allows it. + new_size = ( + sum(f.stat().st_size for f in cuts[0]) + + existing_outputs[-1].stat().st_size + ) + if new_size < target_size: + print(f"Will append {cuts[0]} to {existing_outputs[-1]}") + cuts[0].insert(0, existing_outputs.pop(-1)) + + n_existing = len(existing_outputs) + for i, cut in enumerate(cuts): + # avoid overwriting existing files. + j = i + n_existing + output = regroup_dir / f"{split}_{j:04}.json.gz" + inputs.append(cut) + outputs.append(output) + print( + str(regroup_dir / pattern), + "->", + len(cuts), + f"shards ({n_existing} already there).", + ) + + ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2) + ex(_regroup, repeat(conf), inputs, outputs) + + return regroup_dir + + +def _regroup(conf: Config, inputs: List[Path], output: Path) -> str: + output.parent.mkdir(parents=True, exist_ok=True) + regroup_module.fast_reshard( + inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup + ) + return f"Regrouped {output}" + + +def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path: + """Reshards each language/quality after 'mine'.""" + # check that mining is over. + regroup_dir = conf.get_mined_dir(regroup=True) + assert all_dirs, "Received no dirs to move" + assert all( + d.is_dir() for d in all_dirs + ), f"move_segments was expecting dirs received files: {all_dirs[:10]}..." + + regroup_dir.parent.mkdir(exist_ok=True) + regroup_dir.mkdir(exist_ok=True) + ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2) + + def _move_segments(subdir: Path, regroup_dir: Path) -> str: + n = 0 + for f in subdir.iterdir(): + if not f.is_file() or f.is_symlink(): + continue + n += f.name.endswith(".json.gz") + new_name = regroup_dir / f.name + target = new_name.resolve() + assert f.resolve() != target + # this make the job idempotent. + f.rename(new_name) + f.symlink_to(target) + + if n == 0: + return "" + + return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}" + + ex(_move_segments, all_dirs, repeat(regroup_dir)) + print(f"Results are in {regroup_dir}") + return regroup_dir + + +def _validate_test(conf: Config, output_dir: Path, generate: bool = False): + stats: Dict[str, dict] = {} + for file in sorted(output_dir.glob("*.json.gz")): + fname = "/".join((file.parent.name, file.name)) + # The order of documents is not guaranteed inside a shard, + lines = sorted(jsonql.open_read(file)) + content = "\n".join(lines) + size = len(content) + checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest() + # first_document = json.loads(lines[0]) + stats[fname] = {"size": size, "checksum": checksum} + + def dump(x): + return json.dumps(x, indent=2, ensure_ascii=False) + + print("*** Stats ***") + stats_raw = dump(stats) + stats_file = FILE_DIR / "data" / "test_stats.json" + if generate: + print("Saving stats to", stats_file) + stats_file.write_text(stats_raw) + return + + expected_stats: Dict[str, dict] = {} + if stats_file.exists(): + expected_stats = json.loads(stats_file.read_text()) + + if expected_stats == stats: + print("Everything looks good !") + return + + stats_file.with_suffix(".actual.json").write_text(stats_raw) + print("*** Expected Stats ***") + print(dump(expected_stats)) + + print("*** Diff ***") + for fname in sorted(expected_stats.keys()): + print(fname) + assert fname in expected_stats, "missing file " + fname + if expected_stats[fname]["size"] != stats[fname]["size"]: + print( + " - Expected size", + expected_stats[fname]["size"], + ", size", + stats[fname]["size"], + ) + if expected_stats[fname]["checksum"] != stats[fname]["checksum"]: + print( + " - Expected checksum", + expected_stats[fname]["checksum"], + ", checksum", + stats[fname]["checksum"], + ) + + +def get_main_parser() -> ArgumentParser: + # Generates the 'main' parser by patching a 'Config' parser + p = func_argparse.func_argparser(Config) + + # Override defaults value to None, so we know what was set by the user. + # Note that it will keep the original default values in the help message. + p.set_defaults(**{f: None for f in Config._fields}) + p.add_argument("--config", type=str, default="base") + p.set_defaults(__command=main) + return p + + +def main(config: str = "base", **config_as_dict: Any) -> None: + # Use the given 'config' as default value. + config_base = config + if config_base in PREDEF_CONFIGS: + conf = PREDEF_CONFIGS[config_base] + elif Path(config_base).exists(): + conf = Config.from_json(Path(config_base)) + else: + raise ValueError( + f"Invalid value {config_base} for --config. " + f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file." + ) + conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None}) + + print(f"Will run cc_net.mine.main with the following config:", conf) + + all_files = mine(conf) + if conf.will_split: + assert all_files + assert all(d.is_dir() for d in all_files) + all_dirs = all_files + if "split_by_lang" in conf.pipeline: + # Only try regrouping if we split the shards. + regroup(conf, all_dirs) + elif "split_by_segment" in conf.pipeline: + # If we split by segment then regrouping is trivial, since segments appear in only one shard. + move_segments(conf, all_dirs) + + if conf.config_name == "test": + _validate_test(conf, conf.get_mined_dir(regroup=True)) + + +if __name__ == "__main__": + func_argparse.parse_and_call(get_main_parser()) diff --git a/cc-multilingual-main/cc_net/cc_net/minify.py b/cc-multilingual-main/cc_net/cc_net/minify.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5234ea77aecce6fbcff767f09845072657cb57 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/minify.py @@ -0,0 +1,304 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import base64 +import hashlib +import itertools +import urllib.parse +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Sequence, Set, Union + +import numpy as np + +from cc_net import jsonql +from cc_net.execution import get_executor +from cc_net.jsonql import mem_footprint_gb + +HASH_SIZE = 4 +HASH_TYPE = np.uint32 + +PUBLIC_FIELDS = ["url", "digest"] +COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"] +DATA = Path(__file__).parent.parent / "data" + + +# This is similar to dedup methods but with use 32 bits hashes. +def _b2i(b: bytes) -> int: + return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0) + + +def _str_hash(s: str) -> int: + h = hashlib.sha1(bytes(s, encoding="utf-8")) + return _b2i(h.digest()) + + +def get_hashes(lines: Iterable[str]) -> List[bytes]: + h = HASH_SIZE + return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines] + + +def encode_hashes(hashes: Iterable[bytes]) -> str: + return base64.b64encode(b"".join(hashes)).decode("ascii") + + +def encode_as_hashes(lines: Iterable[str]) -> str: + return encode_hashes(get_hashes(lines)) + + +def decode_hashes(compact: str) -> List[bytes]: + all_hashes = base64.b64decode(compact) + res = [] + assert len(all_hashes) % HASH_SIZE == 0 + for i in range(len(all_hashes) // HASH_SIZE): + chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE] + res.append(chunk) + + return res + + +def encode_line_ids(line_ids: Sequence[int]) -> str: + arr = np.array(line_ids, dtype=" List[int]: + ids_bytes = bytearray(base64.b64decode(compact)) + return np.ndarray(len(ids_bytes) // 2, dtype=" int: + assert digest.startswith("sha1:") + h = base64.b32decode(digest[5:]) + return _b2i(h[:HASH_SIZE]) + + +class Minifier(jsonql.Transformer): + ready = True + + def __init__(self): + self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS) + + def do(self, doc: dict) -> Optional[dict]: + line_ids: List[int] = doc.pop("line_ids") + fields = self.fields + keys = list(doc.keys()) + for k in keys: + if k not in fields: + doc.pop(k, None) + p = doc.get("perplexity", 0) + doc["line_ids"] = encode_line_ids(line_ids) + if p: + doc["perplexity"] = round(p, 1) + s = doc.get("language_score", 0) + if s: + doc["language_score"] = round(s, 2) + return doc + + +class MetadataFetcher(jsonql.Transformer): + """Reads documents from CC snapshot and join precomputed metadata. + + CC snapshots are split in segments. Each segment is 64Mb long. + The metadata must also be stored in segments of the same size and names. + """ + + def __init__(self, folder: Union[Path, str]): + self.ready = True + self.metadata: Dict[int, dict] = {} + + self._segments: Set[str] = set() + self.read_doc = 0 + self.missed_doc = 0 + self.missed_par = 0 + self.processed_par = 0 + + if isinstance(folder, str): + # detect path passed as string + if urllib.parse.urlparse(folder).scheme == "": + folder = Path(folder) + assert folder.exists(), f"Metadata folder not found: {folder}" + + self.folder = folder + self.segment: str = "" + self.segments_read_twice = 0 + + def meta_file(self, segment: str) -> str: + file_name = segment.split("/")[-1] + assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet") + if isinstance(self.folder, str): + return urllib.parse.urljoin( + self.folder, file_name.replace(".warc.wet", ".json") + ) + meta_file = self.folder / file_name.replace(".warc.wet", ".json") + assert ( + meta_file.exists() + ), f"Couldn't find metadata file for segment {segment} at {meta_file}" + return str(meta_file) + + def fetch_metadata(self, segment: str) -> None: + meta_file = self.meta_file(segment) + k = get_doc_key + self.metadata = {} + collision = 0 + for m in jsonql.read_jsons(meta_file): + key = k(m["digest"]) + if key in self.metadata: + collision += 1 + self.metadata[key] = m + + self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}") + if collision > 0: + self._logger.warning(f"Found {collision} collisions !") + + self.segment = segment + if segment in self._segments: + self.log("Cache miss") + self.segments_read_twice += 1 + self._segments.add(segment) + + def do(self, doc: dict) -> Optional[dict]: + if self.segment != doc["cc_segment"]: + self.fetch_metadata(doc["cc_segment"]) + digest = doc["digest"] + key = get_doc_key(digest) + if key not in self.metadata: + return None + + metadata = self.metadata.pop(key) + return self.clean(metadata, doc) + + def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]: + line_ids = decode_line_ids(metadata.pop("line_ids")) + lines = full_doc["raw_content"].split("\n") + cleaned = [] + for l in line_ids: + if l >= len(lines) or l < 0: + self.missed_par += 1 + continue + cleaned.append(lines[l]) + + self.processed_par += len(line_ids) + if not cleaned: + self.missed_doc += 1 + return None + + full_doc["raw_content"] = "\n".join(cleaned) + full_doc["original_nlines"] = full_doc["nlines"] + full_doc["original_length"] = full_doc["length"] + full_doc["nlines"] = len(cleaned) + full_doc["length"] = len(full_doc["raw_content"]) + for key, value in metadata.items(): + full_doc[key] = value + return full_doc + + def summary(self) -> List[str]: + summ = super().summary() + mem = mem_footprint_gb() + len_cache = len(self.metadata) + summ.append( + f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g." + ) + if self.missed_doc: + r = self.missed_doc / self.processed + summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !") + + if self.missed_par: + r = self.missed_par / self.processed + summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !") + return summ + + +def _expand_files(files: List[Path]) -> List[Path]: + if len(files) == 1 and files[0].is_dir(): + folder = files[0] + files = sorted(folder.glob("*.json.gz")) + print(f"Found {len(files)} files under {folder}/*.json.gz") + assert files, "No files found" + return files + + +def minify_file(file: Path, output: Path) -> str: + """Minify the given file.""" + jsonql.run_pipes(Minifier(), file=file, output=output) + return f"Minified {output}" + + +def minify( + files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1 +): + """Minify all the files in the given folder.""" + files = _expand_files(files) + output_dir.mkdir(exist_ok=True) + with open(output_dir / "files.txt", "w") as o: + for f in files: + print(f.name, file=o) + outputs = [output_dir / f.name for f in files] + ex = get_executor( + "minify", + output_dir / "logs", + execution, + timeout_hour=2, + cpus=1, + task_parallelism=parallelism, + ) + ex(minify_file, files, outputs) + + +def fetch_metadata_file( + file: Union[Path, str], + metadata_dir: Union[Path, str], + output: Path, + cache_dir: Path = None, +): + unminifier = MetadataFetcher(metadata_dir) + tmp = output.with_name("tmp." + output.name) + jsonql.run_pipes(unminifier, file=file, output=tmp) + tmp.rename(output) + return f"Fetched metadata for {file}. Results at {output}." + + +def fetch_metadata( + files: List[str], + metadata_dir: Union[Path, str], + output_dir: Path, + execution: str = "mp", + parallelism: int = -1, + cache_dir: Path = None, +): + if len(files) == 1 and Path(files[0]).is_dir(): + folder = Path(files[0]) + files = [str(f) for f in sorted(folder.glob("*.json.gz"))] + print(f"Found {len(files)} files under {folder}/*.json.gz") + + assert len(files) > 0, "No files given." + output_dir.mkdir(exist_ok=True) + + outputs = [output_dir / str(f).split("/")[-1] for f in files] + if cache_dir is None: + cache_dir = output_dir / "wet_cache" + cache_dir.mkdir(exist_ok=True) + if str(cache_dir) == "none": + cache_dir = None + files = [f for f, o in zip(files, outputs) if not o.exists()] + outputs = [o for o in outputs if not o.exists()] + if not files: + return + ex = get_executor( + "unminify", + output_dir / "logs", + execution, + timeout_hour=8, + cpus=1, + task_parallelism=parallelism, + mem_gb=32, + ) + ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir)) + + +if __name__ == "__main__": + import func_argparse + + func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file) diff --git a/cc-multilingual-main/cc_net/cc_net/perplexity.py b/cc-multilingual-main/cc_net/cc_net/perplexity.py new file mode 100644 index 0000000000000000000000000000000000000000..c93d5acde72486eae505feb2fde845ad74bec7dc --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/perplexity.py @@ -0,0 +1,356 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import argparse +import time +from pathlib import Path +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import kenlm # type: ignore +import numpy as np # type: ignore +import pandas as pd # type: ignore +import sentencepiece # type: ignore + +from cc_net import jsonql, text_normalizer + +LMDescriptor = Union[Dict[str, Path], Union[Path, str]] + + +def get_args(): + parser = argparse.ArgumentParser( + description="Compute the score of each sentences of a document", + parents=[jsonql.io_parser()], + ) + parser.add_argument("--models", type=str) + + parser.add_argument("--sentences", action="store_true", default=False) + parser.add_argument( + "--languages", type=str, help="Ignore doc with another language" + ) + parser.add_argument("--field", type=str, default=None) + parser.add_argument("--newline", type=str, default="\n") + return vars(parser.parse_args()) + + +def pp(log_score, length): + return 10.0 ** (-log_score / length) + + +class SentencePiece(jsonql.Transformer): + # Sentence Pieces model have to be read back from disk. + warning_when_pickling = True + + def __init__( + self, + model: Path, + field: str, + output_field: str = "tokenized", + normalize: bool = False, + ): + super().__init__() + self.model = model + self.field = field + self.output_field = output_field + self.normalize = normalize + self.sp: sentencepiece.SentencePieceProcessor = None + + def _prepare(self): + if self.sp is not None: + return + self.sp = sentencepiece.SentencePieceProcessor() + self.sp.load(str(self.model)) + return self + + def do(self, document: dict) -> dict: + text = document[self.field] + if self.normalize: + text = text_normalizer.normalize(text) + tokenized = self.sp.encode_as_pieces(text) + document[self.output_field] = " ".join(tokenized) + return document + + +class MultiSentencePiece(jsonql.Transformer): + warning_when_pickling = True + + def __init__( + self, + models: Union[Path, Dict[str, Path]], + field: str, + output_field: str = "tokenized", + normalize: bool = False, + ): + super().__init__() + self.field = field + self.output_field = output_field + self.normalize = normalize + self._prefetch: Sequence[str] = [] + + if isinstance(models, Path): + self.models = { + m.name.split(".")[0]: m for m in models.parent.glob(models.name) + } + else: + self.models = models + self._prefetch = list(models.keys()) + self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {} + + def _prepare(self) -> None: + for lang in self._prefetch: + assert ( + self.get_sp(lang) is not None + ), f"No model found for {lang} at {self.models.get(lang)}." + + def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]: + sp = self.sp.get(lang) + if sp is not None: + return sp + if lang not in self.models: + return None + + start_load = time.time() + self.log(f"Loading {self.models[lang]}...") + sp = sentencepiece.SentencePieceProcessor() + sp.load(str(self.models[lang])) + self.sp[lang] = sp + load_time = time.time() - start_load + self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)") + return sp + + def do(self, document: dict) -> Optional[dict]: + text = document[self.field] + if self.normalize: + text = text_normalizer.normalize(text) + sp = self.get_sp(document.get("language")) + if sp is None: + return document + tokenized = sp.encode_as_pieces(text) + document[self.output_field] = " ".join(tokenized) + return document + + +class DocLM(jsonql.Transformer): + def __init__( + self, + models: Union[Path, Dict[str, Path]], + field: str, + output_field: str = "perplexity", + newline: str = "\n", + normalize: bool = True, + load_method: int = 2, + ): + super().__init__() + self.field = field + self.output_field = output_field + self.newline = newline + self.normalize = normalize + self._prefetch: Sequence[str] = [] + self.lm_config = kenlm.Config() + # This is the default settings + # POPULATE will mmap the models and populate the pages. + # Maybe that's not the best way when the models are on a network disk. + # TODO: try copying models file, try READ or PARALLEL_READ + self.lm_config.load_method = load_method + + if isinstance(models, Path): + self.models = { + m.name.split(".")[0]: m for m in models.parent.glob(models.name) + } + else: + self.models = models + self._prefetch = list(models.keys()) + self.lm: Dict[str, kenlm.Model] = {} + self.n_lines = 0 + + def _prepare(self) -> None: + for lang in self._prefetch: + assert ( + self.get_lm(lang) is not None + ), f"No model found for {lang} at {self.models.get(lang)}." + + def get_lines(self, document: dict) -> List[str]: + lang = document.get("language") + if not lang: + return [] + if lang not in self.models: + return [] + + content = document.get(self.field) + if not content: + return [] + + lines = content.split(self.newline) + self.n_lines += len(lines) + return lines + + def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]: + if lang is None: + return None + lm = self.lm.get(lang) + if lm is not None: + return lm + model = self.models.get(lang) + if model is None: + return None + start_load = time.time() + self.log(f"Loading {self.models[lang]}...") + lm = kenlm.Model(str(model), self.lm_config) + self.lm[lang] = lm + load_time = time.time() - start_load + self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)") + + return lm + + def do(self, document: dict) -> dict: + lines = self.get_lines(document) + model = self.get_lm(document.get("language")) + if not lines or not model: + return document + + doc_log_score, doc_length = 0, 0 + for line in lines: + if self.normalize: + line = text_normalizer.normalize(line) + log_score = model.score(line) + length = len(line.split()) + 1 + doc_log_score += log_score + doc_length += length + + document[self.output_field] = round(pp(doc_log_score, doc_length), 1) + return document + + def summary(self): + delay = time.time() - self.start_time + h = delay / 3600 + s = self.n_lines / delay + + summ = super().summary() + summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).") + return summ + + +class SentencesLM(DocLM): + """Returns the score of each individual paragraph.""" + + def do(self, document: dict) -> Optional[str]: # type: ignore + lines = self.get_lines(document) + model = self.get_lm(document.get("language")) + if not lines or not model: + return None + + sentences = [] + for line in lines: + if self.normalize: + line = text_normalizer.normalize(line) + log_score = model.score(line) + length = len(line.split()) + 1 + + sentences.append(f"{pp(log_score, length)}\t{line}") + + return "\n".join(sentences) + + +class PerplexityBucket(jsonql.Transformer): + def __init__( + self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60 + ): + super().__init__() + self.cutoff_csv = cutoff_csv + self.percentile_head = percentile_head + self.percentile_tail = percentile_tail + self.cutoffs: Dict[str, Tuple[float, float]] = {} + + def _prepare(self) -> None: + cutoffs = pd.read_csv(self.cutoff_csv, index_col=0) + self.cutoffs = { + l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail]) + for l in cutoffs.columns + } + + def get_bucket(self, doc: dict) -> str: + perplexity = doc.get("perplexity", -1) + lang = doc.get("language") + if lang not in self.cutoffs or perplexity < 0: + return "all" + + pp_head, pp_tail = self.cutoffs[lang] + if perplexity < pp_head: + return "head" + if perplexity < pp_tail: + return "middle" + return "tail" + + def do(self, doc: dict) -> dict: + doc["bucket"] = self.get_bucket(doc) + return doc + + +class DropKeys(jsonql.Transformer): + def __init__(self, *keys): + super().__init__() + self.keys = keys + + def do(self, document: dict) -> Optional[dict]: + if not document: + return None + + for key in self.keys: + document.pop(key, None) + return document + + +class RemoveSmall(jsonql.Transformer): + def __init__(self, field, min_len): + super().__init__() + self.field = field + self.min_len = min_len + self.removed = 0 + + def do(self, document: dict) -> Optional[dict]: + if not document: + return None + + content = document.get(self.field) + if not content or len(content) < self.min_len: + self.removed += 1 + return None + return document + + def summary(self): + r, n = self.removed, self.processed + ratio = r / n if n else 0 + return [f"Removed {r} small documents out of {n} ({ratio:.1%})"] + + +def perplexity_to_bin(file: Path, output: Path, models, tok_field: str): + pp_field = "perplexity" + lm = DocLM(models, tok_field, output_field=pp_field) + stats: List[float] = [] + max_stats = 1_000_000 + batch_size = 100_000 + i = 0 + batch = [] + with open(output, "wb") as o: + for doc in jsonql.read_jsons(file): + i += 1 + pp = lm(doc)[pp_field] + if len(stats) < max_stats: + stats.append(pp) + batch.append(pp) + if len(batch) >= batch_size: + np.array(batch, dtype=np.float32).tofile(o) + batch = [] + if len(batch) > 0: + np.array(batch, dtype=np.float32).tofile(o) + + +if __name__ == "__main__": + args = get_args() + output = Path(args["output"]) + if output.suffix == ".bin": + perplexity_to_bin(args["file"], output, args["models"], args["field"]) + else: + jsonql.run_pipe(DocLM, args) diff --git a/cc-multilingual-main/cc_net/cc_net/process_wet_file.py b/cc-multilingual-main/cc_net/cc_net/process_wet_file.py new file mode 100644 index 0000000000000000000000000000000000000000..d27e4ba1944c89b02d2f35def9b65012b727fc9b --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/process_wet_file.py @@ -0,0 +1,292 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import contextlib +import functools +import logging +import re +import tempfile +import time +import urllib.request +from pathlib import Path +from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence +from urllib.parse import urlparse + +import func_argparse +from bs4 import BeautifulSoup # type: ignore + +from cc_net import jsonql + +WET_URL_ROOT = "https://data.commoncrawl.org" + + +logger = logging.getLogger(__name__) + + +def cc_wet_paths_url(dump_id: str) -> str: + return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"]) + + +@functools.lru_cache() +def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]: + wet_paths = cc_wet_paths_url(dump_id) + cache_dir = cache_dir or jsonql._tmp_dir() + wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz" + f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache) + return [segment.strip() for segment in f] + + +def list_dumps() -> List[str]: + home_page = BeautifulSoup( + urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser" + ) + dumps = [a.get("href").strip("/") for a in home_page.findAll("a")] + dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)] + + return sorted(dumps) + + +def ls(): + for dump in list_dumps(): + print(dump, "->", cc_wet_paths_url(dump)) + + +def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]: + """Headers format is: + WARC/1.0 + WARC-Type: conversion + WARC-Target-URI: [url] + WARC-Date: [crawldate: 2019-02-15T19:15:59Z] + WARC-Record-ID: + WARC-Refers-To: + WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2 + Content-Type: text/plain + Content-Length: 7743 + """ + if not headers or not doc: + return None + + try: + warc_type = headers[1].split()[1] + if warc_type != "conversion": + return None + url = headers[2].split()[1] + date = headers[3].split()[1] + digest = headers[6].split()[1] + length = 0 + for h in headers: + if(h != "" and h.split()[0] == "Content-Length:"): + length = int(h.split()[1]) + break + + except Exception as e: + logger.warning("Can't parse header:", e, headers, doc) + return None + + # Docs are separated by two empty lines. + last = None + if not doc[-1] and not doc[-2]: + last = -2 + title, doc = doc[0], doc[1:last] + + return { + "url": url, + "date_download": date, + "digest": digest, + "length": length, + "nlines": len(doc), + "source_domain": urlparse(url).netloc, + "title": title, + "raw_content": "\n".join(doc), + } + + +def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]: + doc: List[str] = [] + headers, read_headers = [], True + for warc in warc_lines: + warc = warc.strip() + if read_headers: + headers.append(warc) + read_headers = warc != "" + continue + + if warc == "WARC/1.0": + # We reached the beginning of the new doc. + parsed = parse_doc(headers, doc) + if parsed is not None: + yield parsed + headers, doc, read_headers = [warc], [], True + continue + + doc.append(warc) + + # Return the last document + if doc: + parsed = parse_doc(headers, doc) + if parsed is not None: + yield parsed + + +def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]: + n_doc = 0 + n_ok = 0 + for doc in group_by_docs(lines): + n_doc += 1 + if not doc or len(doc["raw_content"]) < min_len: + continue + n_ok += 1 + yield doc + if n_doc > 0: + logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).") + else: + logger.info(f"Found no documents") + + +def dl( + dump: str, + shard: int, + num_shards: int, + output: Path = None, + num_segments_per_shard: int = 0, +): + """Download a shard of the common crawl, and export it to json. + + Arguments: + output: filename of the output file + dump: CC dump id + shard: id of the shard + num_shards: total number of shards + num_segments_per_shard: manual control of the number of segment per shard. + """ + reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard) + jsonql.run_pipes(inputs=reader, output=output) + logger.info(f"Done. {output} is ready.") + + +class CCSegmentsReader(Iterable[dict]): + def __init__( + self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None + ): + self._segments = segments + self.min_len = min_len + if cache_dir is not None: + cache_dir = Path(cache_dir) + cache_dir.mkdir(exist_ok=True) + self.cache_dir = cache_dir + self.retrieved_segments = 0 + + def segment_url(self, segment: str): + return "/".join((WET_URL_ROOT, segment)) + + @property + def segments(self) -> Sequence[str]: + return self._segments + + def open_segment(self, segment: str) -> Iterable[str]: + url = self.segment_url(segment) + file: Optional[Path] = None + if self.cache_dir: + file = self.cache_dir / segment.split("/")[-1] + if not file or not file.exists(): + self.retrieved_segments += 1 + + return jsonql.open_remote_file(url, cache=file) + + def __iter__(self) -> Iterator[dict]: + n = len(self.segments) + for i, segment in enumerate(self.segments): + start = time.time() + # TODO: start downloading the next segment in the background + for doc in parse_warc_file(self.open_segment(segment), self.min_len): + doc["cc_segment"] = segment + yield doc + + if i + 1 >= n: + continue + end = time.time() + delay = (end - start) / 3600 * (n - 1 - i) + logger.info( + f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h" + ) + + +class CCShardReader(CCSegmentsReader): + def __init__( + self, + dump: str, + shard: int, + num_shards: int = -1, + num_segments_per_shard: int = 40, + min_len: int = 300, + cache_dir: Path = None, + ): + """Downloads a shard of Common Crawl, and yields dict. + + Arguments: + dump: CC dump id + shard: id of the shard + num_shards: total number of shards + num_segments_per_shard: if set will limit the number of files by shard. + Useful for testing. + """ + super().__init__([], min_len=min_len, cache_dir=cache_dir) + self.dump = dump + self.shard = shard + assert num_shards > 0 or num_segments_per_shard > 0 + self.num_shards = num_shards + self.num_segments_per_shard = num_segments_per_shard + + @property + def segments(self) -> Sequence[str]: + # Delaying the initialization allows to delay the looking up of the WET files + if self._segments: + return self._segments + segments = cc_segments(self.dump, self.cache_dir) + n = len(segments) + if self.num_shards < 0: + self.num_shards = n // self.num_segments_per_shard + i_min = (self.shard * n) // self.num_shards + i_max = ((self.shard + 1) * n) // self.num_shards + if self.num_segments_per_shard > 0: + i_max = min(i_max, i_min + self.num_segments_per_shard) + self._segments = segments[i_min:i_max] + return self._segments + + +def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path: + _, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir) + return Path(tmp_path) + + +@contextlib.contextmanager +def timer(name: str = "-"): + start = time.time() + yield None + delay = time.time() - start + print(f"{name} took {delay:.1f}s") + + +def benchmark(tmp_path: Path): + segments = [ + "crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz" + ] + seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz" + + with timer("from network"): + list(CCSegmentsReader(segments)) + + with timer("from network, with caching"): + list(CCSegmentsReader(segments, cache_dir=tmp_path)) + assert seg_file.exists() + + with timer("from disk"): + CCSegmentsReader(segments, cache_dir=tmp_path) + seg_file.unlink() + + +if __name__ == "__main__": + func_argparse.main(ls, dl) diff --git a/cc-multilingual-main/cc_net/cc_net/regroup.py b/cc-multilingual-main/cc_net/cc_net/regroup.py new file mode 100644 index 0000000000000000000000000000000000000000..575baeefe78ea8a22f72b49452f1f8e4f157a161 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/regroup.py @@ -0,0 +1,122 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import logging +import subprocess +from pathlib import Path +from typing import List + +import func_argparse +import numpy as np + +from cc_net import jsonql + + +def get_index(file: Path) -> Path: + return file.parent / (file.name + ".index") + + +def _get_tmp(output: Path) -> Path: + return output.parent / (output.stem + ".tmp" + output.suffix) + + +def reshard( + inputs: List[Path], + output: Path, + tmp: Path = None, + free_original: bool = False, + rm_original: bool = False, +) -> Path: + """Read the given files and concatenate them to the output file. + + Can remove original files on completion, or just write dummy content into them to free disk. + """ + if tmp is None: + tmp = _get_tmp(output) + logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}") + jsonql.run_pipes(file=inputs, output=tmp) + tmp.replace(output) + tmp_index = get_index(tmp) + if tmp_index.exists(): + tmp_index.replace(get_index(output)) + + if not (free_original or rm_original): + return output + + for _input in inputs: + if rm_original: + _input.unlink() + elif free_original: + # Overwrite the previous file. + # This frees up disk space and allows doit to properly track the success. + _input.write_text(f"Resharded into {output}") + if get_index(_input).is_file(): + get_index(_input).unlink() + + return output + + +def fast_reshard( + inputs: List[Path], + output: Path, + tmp: Path = None, + free_original: bool = False, + rm_original: bool = False, +) -> Path: + """Same as reshard but don't re-compress the output. + + This will lead to a bigger output file, especially if the shards are very small. + """ + if tmp is None: + tmp = _get_tmp(output) + with open(tmp, "wb") as o: + subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o) + + tmp.replace(output) + indexes_files = [get_index(i) for i in inputs] + existing_indexes = sum(i.exists() for i in indexes_files) + assert ( + existing_indexes == len(indexes_files) or existing_indexes == 0 + ), "some indexes don't exist." + if existing_indexes > 0: + indexes = [np.load(idx) for idx in indexes_files] + for i in range(len(indexes) - 1): + indexes[i + 1] += indexes[i][-1] + with open(str(output) + ".index", "wb") as o: + np.save(o, np.concatenate(indexes)) + + if not (free_original or rm_original): + return output + + for _input in inputs: + if rm_original: + _input.unlink() + elif free_original: + # Overwrite the previous file. + # This frees up disk space and allows doit to properly track the success. + _input.write_text(f"Resharded into {output}") + if get_index(_input).is_file(): + get_index(_input).unlink() + + return output + + +def determine_groups( + inputs: List[Path], target_size: int = 4 * 1024 ** 3 +) -> List[List[Path]]: + if len(inputs) == 0: + return [] + + sample = inputs[:10] + typical_size = sum(s.stat().st_size for s in sample) / len(sample) + group_size = min(target_size // typical_size, len(inputs)) + group_size = max(group_size, 1) + + return jsonql.grouper(inputs, group_size) + + +if __name__ == "__main__": + func_argparse.single_main(reshard) diff --git a/cc-multilingual-main/cc_net/cc_net/split_by_lang.py b/cc-multilingual-main/cc_net/cc_net/split_by_lang.py new file mode 100644 index 0000000000000000000000000000000000000000..e8c5c6224547d6372e936933d2387c5d3e86543c --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/split_by_lang.py @@ -0,0 +1,151 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import argparse +import collections +from pathlib import Path +from typing import Dict, Optional + +import fasttext # type: ignore + +from cc_net import jsonql + + +def get_args(): + parser = argparse.ArgumentParser( + description="Read a list of json files and split them ", + parents=[jsonql.io_parser()], + ) + parser.add_argument("--pattern", type=str) + parser.add_argument("--field", type=str, default="raw_content") + parser.add_argument("--threshold", type=float, default=0) + parser.add_argument("--model", type=str, required=True) + parser.add_argument("--out_field", type=str, default="language") + parser.add_argument("--top", type=int, default=1) + return vars(parser.parse_args()) + + +def predict(model, text: str, k: int = 1): + labels, scores = model.predict(text, k=k) + labels = [l.replace("__label__", "") for l in labels] + return labels, scores + + +def avg_predict(model, text): + # Overall gives the same results than predict(model, text.replace("\n", "")) + text = text.split("\n") + text_len = sum(len(line) for line in text) + if text_len == 0: + return None, 0 + scores = [predict(model, line) for line in text] + scores_by_label: Dict[str, float] = collections.defaultdict(float) + for (label, score), line in zip(scores, text): + scores_by_label[label] += score * len(line) + + label, score = max(scores_by_label.items(), key=lambda kv: kv[1]) + return label, score / text_len + + +class Classifier(jsonql.Transformer): + def __init__( + self, + model: Path, + field: str, + out_field: str, + threshold: float = 0, + top: int = 1, + language: str = None, + rounding: int = 2, + ): + super().__init__() + self.model = model + assert model.exists(), f"Model {model} doesn't exist." + self.field = field + self.out_field = out_field + self.threshold = threshold + self.top = top + self.language = language + self.rounding = rounding + # Fasttext model is a C object and can't be pickled + self.fasttext_model: fasttext._FastText = None + self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0 + self.cnt: Dict[str, int] = {} + + def _prepare(self): + self.log(f"Loading {self.model}") + self.fasttext_model = fasttext.load_model(str(self.model)) + + def predict(self, text): + return predict(self.fasttext_model, text.replace("\n", ""), k=self.top) + + def do(self, doc: dict) -> Optional[dict]: + text = doc.get(self.field, None) + if not text: + return None + + if self.language and doc.get("language") != self.language: + self.n_ignored += 1 + return doc + + self.n_doc += 1 + labels, scores = self.predict(text) + scores.round(self.rounding, out=scores) + for l in labels: + self.cnt[l] = self.cnt.get(l, 0) + 1 + + if self.top == 1: + existing_label = doc.get(self.out_field, None) + if existing_label and labels[0] != existing_label: + self.n_disagreement += 1 + + if all(s < self.threshold for s in scores): + return None + + self.n_accepted += 1 + if self.top == 1: + doc[self.out_field] = labels[0] + doc[self.out_field + "_score"] = scores[0] + else: + doc[self.out_field] = {l: s for l, s in zip(labels, scores)} + return doc + + def summary(self): + n_doc, n_accepted, n_disagreement, cnt, out_field = ( + self.n_doc, + self.n_accepted, + self.n_disagreement, + self.cnt, + self.out_field, + ) + summ = super().summary() + if self.threshold > 0: + ratio = n_accepted / n_doc if n_doc else 0 + summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})") + summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}") + + disagreement = n_disagreement / n_doc if n_doc else 0 + if disagreement: + summ.append(f"{out_field} disagreement is at {disagreement:.1%}.") + return summ + + def __repr__(self): + return f"Classifier({self.model})" + + +def classify_and_split(file, output, pattern, **kwargs): + classifier = Classifier(**kwargs) + splitter = jsonql.split(pattern) + jsonql.run_pipes(classifier, splitter, file=file, output=output) + + +if __name__ == "__main__": + args = get_args() + pattern = args.get("pattern") + if pattern: + classify_and_split(**args) + else: + args.pop("pattern") + jsonql.run_pipe(Classifier, args) diff --git a/cc-multilingual-main/cc_net/cc_net/text_normalizer.py b/cc-multilingual-main/cc_net/cc_net/text_normalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..cc2bebc4de99244b9498885ab933b240bebdfa0c --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/text_normalizer.py @@ -0,0 +1,189 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import re +import unicodedata + +UNICODE_PUNCT = { + ",": ",", + "。": ".", + "、": ",", + "„": '"', + "”": '"', + "“": '"', + "«": '"', + "»": '"', + "1": '"', + "」": '"', + "「": '"', + "《": '"', + "》": '"', + "´": "'", + "∶": ":", + ":": ":", + "?": "?", + "!": "!", + "(": "(", + ")": ")", + ";": ";", + "–": "-", + "—": " - ", + ".": ". ", + "~": "~", + "’": "'", + "…": "...", + "━": "-", + "〈": "<", + "〉": ">", + "【": "[", + "】": "]", + "%": "%", + "►": "-", +} + +UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]") + + +def replace_unicode_punct(text: str) -> str: + return "".join((UNICODE_PUNCT.get(c, c) for c in text)) + + +def remove_unicode_punct(text: str) -> str: + """More aggressive version of replace_unicode_punct but also faster.""" + return UNICODE_PUNCT_RE.sub("", text) + + +def strip_accents(line: str) -> str: + """Strips accents from a piece of text.""" + nfd = unicodedata.normalize("NFD", line) + output = [c for c in nfd if unicodedata.category(c) != "Mn"] + if len(output) == line: + return line + return "".join(output) + + +# Build a regex matching all control characters. +NON_PRINTING_CHARS_RE = re.compile( + f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]" +) +DIGIT_RE = re.compile(r"\d") +PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile( + (UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "") +) + + +def remove_non_printing_char(text: str) -> str: + return NON_PRINTING_CHARS_RE.sub("", text) + + +def normalize_spacing_for_tok(text: str, language: str = "en") -> str: + res = ( + text.replace("\r", "") + # remove extra spaces + .replace("(", " (") + .replace(")", ") ") + .replace(" +", " ") + ) + res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res) + res = res.replace("( ", "(").replace(" )", ")") + res = re.sub(r"(\d) \%", r"\1\%", res) + res = res.replace(" :", ":").replace(" ;", ";") + res = res.replace("`", "'").replace("''", ' " ') + + res = ( + res.replace("„", '"') + .replace("“", '"') + .replace("”", '"') + .replace("–", "-") + .replace("—", " - ") + .replace(" +", " ") + .replace("´", "'") + .replace("([a-z])‘([a-z])", r"\1'\2/") + .replace("([a-z])’([a-z])", r"\1'\2/") + .replace("‘", '"') + .replace("‚", '"') + .replace("’", '"') + .replace("''", '"') + .replace("´´", '"') + .replace("…", "...") + # French quotes + .replace(" « ", ' "') + .replace("« ", '"') + .replace("«", '"') + .replace(" » ", '" ') + .replace(" »", '"') + .replace("»", '"') + # handle pseudo-spaces + .replace(" %", "%") + .replace("nº ", "nº ") + .replace(" :", ":") + .replace(" ºC", " ºC") + .replace(" cm", " cm") + .replace(" ?", "?") + .replace(" !", "!") + .replace(" ;", ";") + .replace(", ", ", ") + .replace(" +", " ") + .replace(".", ". ") + ) + # English "quotation," followed by comma, style + if language == "en": + res = re.sub(r"\"([,\.]+)", r"\1\"", res) + # Czech is confused + elif language == "cs" or language == "cz": + pass + # German/Spanish/French "quotation", followed by comma, style + else: + res = res.replace(',"', '",') + res = re.sub( + r"(\.+)\"(\s*[^<])", r"\"\1\2", res + ) # don't fix period at end of sentence + + if ( + language == "de" + or language == "es" + or language == "cz" + or language == "cs" + or language == "fr" + ): + res = re.sub(r"(\d) (\d)", r"\1,\2", res) + else: + res = re.sub(r"(\d) (\d)", r"\1.\2", res) + return res + + +def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str: + line = line.strip() + if not line: + return line + if case: + line = line.lower() + if accent: + line = strip_accents(line) + if numbers: + line = DIGIT_RE.sub("0", line) + if punct == 1: + line = replace_unicode_punct(line) + elif punct == 2: + line = remove_unicode_punct(line) + line = remove_non_printing_char(line) + return line + + +def slow_normalize_for_dedup(line: str) -> str: + return normalize(line, accent=False, case=True, numbers=True, punct=2) + + +def normalize_for_dedup(line: str) -> str: + line = line.strip() + if not line: + return line + # case + line = line.lower() + # numbers + line = DIGIT_RE.sub("0", line) + line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line) + return line diff --git a/cc-multilingual-main/cc_net/cc_net/tokenizer.py b/cc-multilingual-main/cc_net/cc_net/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c48c8a45a8bc31ea98b3b0eb49ac12298185c634 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/tokenizer.py @@ -0,0 +1,79 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import time +from typing import Dict, Optional + +import sacremoses # type: ignore + +from cc_net import jsonql, text_normalizer + + +class RobustTokenizer(jsonql.Transformer): + """Moses tokenizer with the expected preprocessing.""" + + LANG_WITHOUT_ACCENT = {"en", "my"} + + def __init__(self, lang: str): + super().__init__() + self.lang = lang + self.moses = sacremoses.MosesTokenizer(lang) + self.rm_accent = lang in self.LANG_WITHOUT_ACCENT + self.ready = True + + def do(self, text: str): + text = text_normalizer.normalize( + text, accent=self.rm_accent, case=False, numbers=False, punct=True + ) + text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang) + return self.moses.tokenize(text, return_str=True, escape=False) + + +class DocTokenizer(jsonql.Transformer): + """Tokenize the text found in `output_field and store the result in `output_field`.""" + + def __init__( + self, + field: str, + output_field: str = "tokenized", + language_field: str = "language", + ): + super().__init__() + self.field = field + self.output_field = output_field + self.language_field = language_field + self.n_docs = 0 + self.tokenizers: Dict[str, RobustTokenizer] = {} + + def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]: + cache = self.tokenizers + if lang in cache: + return cache[lang] + if lang in ("th", "zh", "ja"): + # TODO find a tokenizer for those languages + return None + + cache[lang] = RobustTokenizer(lang) + return cache[lang] + + def do(self, document): + lang = document[self.language_field] + tok = self.get_tokenizer(lang) + if not tok: + return document + + self.n_docs += 1 + lines = document[self.field].split("\n") + tokenized = "\n".join(tok(l) for l in lines) + document[self.output_field] = tokenized + return document + + def summary(self): + delay = (time.time() - self.start_time) / 3600 + speed = self.n_docs / delay + return [ + f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)." + ] diff --git a/cc-multilingual-main/cc_net/cc_net/tools/__init__.py b/cc-multilingual-main/cc_net/cc_net/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cc-multilingual-main/cc_net/cc_net/tools/dl_cc_100.py b/cc-multilingual-main/cc_net/cc_net/tools/dl_cc_100.py new file mode 100644 index 0000000000000000000000000000000000000000..ca06f0d63b1e298cbb0bf81e44d9244afc71290b --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/tools/dl_cc_100.py @@ -0,0 +1,206 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +import contextlib +import functools +import gzip +import logging +import multiprocessing +from collections import defaultdict +from pathlib import Path +from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple + +import cc_net +from cc_net import jsonql +from cc_net.process_wet_file import CCSegmentsReader + +# Set this to a directory to use as cache for intermediary files. +# This helps for debugging. +WET_CACHE = None +# WET_CACHE = Path("wet_cache") + +S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100" +VERSION = "1.0.0" + +CC_100_SNAPSHOTS = [ + "2018-05", + "2018-09", + "2018-13", + "2018-17", + "2018-22", + "2018-26", + "2018-30", + "2018-34", + "2018-39", + "2018-43", + "2018-47", + "2018-51", +] + +BIG_LANGUAGES = { + "es_XX", + "fr_XX", + "de_DE", + "ja_XX", + "ru_RU", + "zh_CN", + "en_XX", + "it_IT", + "ar_AR", + "nl_XX", + "pl_PL", + "pt_XX", + "tr_TR", + "zh_TW", +} + + +class Paragraph(NamedTuple): + lang: str + text: str + lm_score: float + + +def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]: + """ + Download metadata from a shards. + + Sample metadata: + + { + "cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz", + "digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ", + "url": "http://personals.gearplay.com/ads/DRJONES.htm", + "line_ids": [10], + "languages": ["en_XX"], + "lm_scores": [-2.658], + } + """ + snapshot = snapshot.replace("-", "_") + name = f"snap_{snapshot}_batch_{shard}.json.gz" + url = "/".join([S3_BUCKET, VERSION, name]) + shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict) + try: + cache_file: Optional[Path] = None + if WET_CACHE is not None: + cache_file = WET_CACHE / name + metadata_file = jsonql.open_remote_file(url, cache_file) + except: + logging.warning(f"Couldn't open {url}") + return + + for meta in jsonql.read_jsons(metadata_file): + shard_metadata[meta["cc_segment"]][meta["digest"]] = meta + + found_pars, missed_pars = 0, 0 + for seg, segment_metadata in shard_metadata.items(): + for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE): + if doc["digest"] not in segment_metadata: + continue + + meta = segment_metadata[doc["digest"]] + full_pars = [doc["title"]] + doc["raw_content"].split("\n") + + assert len(meta["line_ids"]) == len(meta["languages"]) + assert len(meta["line_ids"]) == len(meta["lm_scores"]) + for i, lang, score in zip( + meta["line_ids"], meta["languages"], meta["lm_scores"] + ): + if snapshot != "2018-51" and lang in BIG_LANGUAGES: + # Big languages only come from "2018-51" snapshot + continue + if i >= len(full_pars): + # This is because CC100 was created by saving only urls. + # Some urls appears in different snapshot with slightly different + # versions, but we don't know which one is correct. + # Here we read both versions, but some index may end up + # being incorrect. + # This impact ~3% documents. + missed_pars += 1 + continue + + yield Paragraph(lang, full_pars[i], score) + found_pars += 1 + if missed_pars > 0: + logging.warning( + f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes." + ) + + +def _split_by_par( + paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path +) -> int: + outdir.mkdir(exist_ok=True) + outfiles = {} + num_pars = 0 + try: + for par in paragraphes: + # MODIFY ME: filter paragraph if needed (languages, score, ...) + if par.lang not in outfiles: + (outdir / par.lang).mkdir(exist_ok=True) + outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz" + outfiles[par.lang] = gzip.open(outfile, "wt") + + print(par.text, file=outfiles[par.lang]) + num_pars += 1 + finally: + for o in outfiles.values(): + o.close() + + logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}") + return num_pars + + +def dl_shard(snapshot: str, shard: int, outdir: Path) -> int: + return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir) + + +@contextlib.contextmanager +def unordered_map(processes: int): + if processes == 0: + yield map + return + + with multiprocessing.Pool(processes) as pool: + yield pool.imap_unordered + + +def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None: + _dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir) + + with unordered_map(processes) as umap: + num_pars = sum(umap(_dl_shard, range(500))) + + logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.") + + +def dl( + snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1 +) -> None: + """ + Download CC100 corpus. + Will create one text file per language and CC snapshot. + + - snapshot: restrict to one snapshot. Useful for parallelization. + - outdir: output directory + - processes: number of processes to use + """ + if snapshot is None: + snapshots = CC_100_SNAPSHOTS + else: + snapshots = snapshot.split(",") + + invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS] + assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}" + + for snapshot in snapshots: + dl_snapshot(snapshot, outdir, processes) + + +if __name__ == "__main__": + import func_argparse + + func_argparse.single_main(dl) diff --git a/cc-multilingual-main/cc_net/cc_net/tools/expand_corpus.py b/cc-multilingual-main/cc_net/cc_net/tools/expand_corpus.py new file mode 100644 index 0000000000000000000000000000000000000000..44f3dce62a1b525d7f7469247c0e8e70cd5b5cd6 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/tools/expand_corpus.py @@ -0,0 +1,295 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +Tools to search sentences in CC similar to sentences in another corpus. +""" + +import functools +import logging +import math +import subprocess +from collections import Counter +from pathlib import Path +from typing import Iterable, List, Optional, Set, Tuple + +import func_argparse +import submitit +from kenlm import Model as KenlmModel # type: ignore +from sentence_splitter import SentenceSplitter # type: ignore +from sentencepiece import SentencePieceProcessor # type: ignore + +from cc_net import dedup, jsonql, perplexity, text_normalizer + +KENLM = Path("./bin/lmplz") +KENLM_BUILD = Path("./bin/build_binary") +VOCAB_SIZE = 2 ** 16 - 10 +PROCESSES = 16 + + +def normalize(corpus: Path, output_dir: Path) -> Path: + normalized = output_dir / (corpus.stem + ".normalized") + if normalized.exists(): + return normalized + + print("Will normalize", corpus, "to", normalized) + jsonql.run_pipes( + jsonql.Mapper(text_normalizer.normalize), + file=corpus, + output=normalized, + processes=PROCESSES, + ) + return normalized + + +# TODO use classic files directory. +def sp_model(lang: str) -> Path: + return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model") + + +def _dataset(dataset: Optional[Path], lang: str) -> Path: + return ( + dataset + or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz" + ) + + +class SentencePiece(jsonql.Transformer): + def __init__(self, model: Path): + super().__init__() + self.model = model + self.sp: SentencePieceProcessor = None # type: ignore + + def _prepare(self): + self.sp = SentencePieceProcessor() + self.sp.load(str(self.model)) + + def do(self, line: str) -> str: + return " ".join(self.sp.encode_as_pieces(line)) + + +class ExtractSentences(jsonql.Transformer): + def __init__( + self, + sp_model: Path, + lm_model: Path, + field: str = "raw_content", + threshold: float = float("+inf"), + ): + super().__init__() + self.sp_model = sp_model + self.lm_model = lm_model + self.field = field + self.threshold = threshold + self.sp: SentencePieceProcessor = None + self.lm: KenlmModel = None + self.splitter: SentenceSplitter = None + self.hashes: Set[int] = set() + + def _prepare(self): + self.sp = SentencePieceProcessor() + self.sp.load(str(self.sp_model)) + self.splitter = SentenceSplitter("en") + self.lm = KenlmModel(str(self.lm_model)) + + def do(self, document: dict) -> Optional[str]: + content: Optional[str] = document.get(self.field) + if not content: + return None + all_sentences = [ + s for l in content.split("\n") if l for s in self.splitter.split(text=l) + ] + unique_sentences = [] + for s in all_sentences: + if not s: + continue + h = dedup.str_hash(s) + if h in self.hashes: + continue + self.hashes.add(h) + unique_sentences.append(s) + + scores = [] + for sentence in unique_sentences: + normalized = text_normalizer.normalize(sentence) + pieces = self.sp.encode_as_pieces(normalized) + log_score = self.lm.score(" ".join(pieces)) + pp = -1 + if len(pieces): + pp = perplexity.pp(log_score, len(pieces)) + scores.append(pp) + + res = filter( + lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences) + ) + return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None + + +def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path: + tokenized = output_dir / (corpus.stem + ".tokenized") + if tokenized.exists(): + return tokenized + + print("Will SentencePiece", corpus, "to", tokenized) + jsonql.run_pipes( + SentencePiece(sp_model(lang)), + file=normalize(corpus, output_dir), + output=tokenized, + processes=PROCESSES, + ) + + return tokenized + + +def train_lm( + corpus: Path, + output_dir: Path, + lang: str = "en", + vocab_size: int = VOCAB_SIZE, + ngrams: int = 5, +): + lm_text_file = output_dir / (corpus.stem + ".arpa") + lm_bin_file = output_dir / (corpus.stem + ".arpa.bin") + if lm_bin_file.exists(): + return lm_bin_file + + assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found." + + normalized = normalize(corpus, output_dir) + tokenized = tokenize(normalized, output_dir, lang) + + print("Will train LM", lm_text_file, "on", tokenized) + kenlm_cmd = [ + str(KENLM), + f"--order={ngrams}", + "--memory=8G", + f"--temp_prefix={jsonql._tmp_dir()}", + f"--text={tokenized}", + f"--arpa={lm_text_file}", + f"--vocab_estimate={vocab_size}", + "--discount_fallback", + ] + subprocess.run(kenlm_cmd, check=True) + print("Will create binary model", lm_bin_file, "from", lm_text_file) + subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True) + return lm_bin_file + + +def uniform_sampling_wrt_perplexity( + paragraphes: Iterable[str], + rounding: float = 100.0, + cut: float = 1000.0, + samples: int = 20, +) -> Iterable[str]: + max_samples = math.floor(cut / rounding * samples) + n = 0 + buckets = Counter([0.0]) + logging.info(f"Will sample {max_samples} sentences.") + for lines in paragraphes: + for line in lines.split("\n"): + if not line: + continue + pp = float(line[: line.find("\t")]) + pp = math.floor(pp / rounding) * rounding + if pp > cut: + continue + if buckets[pp] > samples: + continue + yield line + buckets[pp] += 1 + if buckets[pp] > samples: + logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)") + n += 1 + if n > max_samples: + return + + +def sample( + corpus: Path, + output_dir: Path, + dataset: Path = None, + n: int = 10_000, + lang: str = "en", +) -> Path: + sample_file = output_dir / (corpus.stem + ".pp_sample.tsv") + if sample_file.exists(): + return sample_file + dataset = _dataset(dataset, lang) + extractor = ExtractSentences( + sp_model(lang), train_lm(corpus, output_dir), field="raw_content" + ) + sampling = functools.partial( + uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10 + ) + + print(f"Will sample data from {dataset} to {sample_file}") + try: + jsonql.run_pipes( + extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES + ) + except Exception: + sample_file.unlink() + raise + + subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True) + subprocess.run(["head", sample_file], check=True) + return sample_file + + +def mine( + corpus: Path, + output_dir: Path, + threshold: float, + dataset: Path = None, + lang: str = "en", +) -> List[Path]: + """Search sentences in CC similar to the one in the given corpus. + + Args: + - corpus: corpus to train the LM one. Assumes one sentence per line. + - output_dir: where to store the results + - threshold: maximum perplexity to have + - dataset: glob pattern matching CC shards. + - lang: search in the files of this language + """ + dataset = _dataset(dataset, lang) + files = list(dataset.parent.glob(dataset.name)) + outputs = [output_dir / (f.stem + ".tsv") for f in files] + if all(o.exists() for o in outputs): + return outputs + + n = len(outputs) + sp = [sp_model(lang)] * n + lm = [train_lm(corpus, output_dir)] * n + thresholds = [threshold] * n + + ex = submitit.AutoExecutor(output_dir / "mining_logs") + ex.update_parameters( + name="mine", + cpus_per_task=PROCESSES, + timeout_min=60 * 24 // PROCESSES, + mem_gb=10, + ) + jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds) + print("Submited job array:", jobs[0]) + + for j in submitit.helpers.as_completed(jobs): + (i, o) = j.result() + print("Mined sentences from", i, "to", o) + + return outputs + + +def _mine( + file: Path, output: Path, sp: Path, lm: Path, threshold: float +) -> Tuple[Path, Path]: + extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold) + jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES) + return (file, output) + + +if __name__ == "__main__": + func_argparse.main(sample, mine) diff --git a/cc-multilingual-main/cc_net/cc_net/tools/make_dmoz_corpus.py b/cc-multilingual-main/cc_net/cc_net/tools/make_dmoz_corpus.py new file mode 100644 index 0000000000000000000000000000000000000000..214b95eee024ca3333fd155262a1713b38829d48 --- /dev/null +++ b/cc-multilingual-main/cc_net/cc_net/tools/make_dmoz_corpus.py @@ -0,0 +1,97 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +This code is used to train a fastText classifier to label document with DMOZ categories. + +The data, distributed under the cc-by 3.0 license +(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html), +can be downloaded from +https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz. +""" + +import urllib.request +from io import StringIO +from pathlib import Path +from typing import Dict, Set +from urllib.parse import urlparse + +import func_argparse +from lxml import etree # type: ignore + +from cc_net import jsonql + +TaggedUrls = Dict[str, Set[str]] +DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz" + + +def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls): + if url in url2tags: + url2tags[url] &= tags + else: + url2tags[url] = tags + + +def load_tags(filename: Path = None) -> TaggedUrls: + if filename is None: + with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz: + tree = etree.parse(dmoz) + else: + tree = etree.parse(str(filename)) + + root = tree.getroot() + url2tags: Dict[str, Set[str]] = {} + for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"): + url = external_page.get("about") + domain = urlparse(url).netloc + for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"): + # print(url, topic.text) + # Tags looks like Top/Arts/Animation/Anime/Collectibles + tags = set(topic.text.split("/")[1:]) + add_tags(url, tags, url2tags) + add_tags(domain, tags, url2tags) + return url2tags + + +def dl(output: Path) -> None: + urllib.request.urlretrieve(DMOZ_TAGS_URL, output) + + +def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None: + """ + Loads a tags file and create a training dataset using the given webpages. + + Arguments: + - file: CC shard file + - tags_file: dmoz tagging file, (like the one produced by `dl`) + - output: "" + """ + url2tags = load_tags(tags_file) + with jsonql.open_write(output) as o: + for document in jsonql.read_jsons(file): + if not document: + continue + url = document["url"] + domain = document["source_domain"] + + if url in url2tags: + tags = url2tags[url] + elif domain in url2tags: + tags = url2tags[domain] + else: + continue + + if len(tags) == 0: + continue + + fasttext_tags = ["__label__" + tag for tag in tags] + content = document["tokenized"].replace("\n", " ").lower() + if len(content) > 200: + print(" ".join(fasttext_tags), content, file=o) # type: ignore + + +if __name__ == "__main__": + func_argparse.single_main(make_corpus) diff --git a/cc-multilingual-main/cc_net/cc_net/xyz.ipynb b/cc-multilingual-main/cc_net/cc_net/xyz.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/.gitignore b/cc-multilingual-main/cc_net/third_party/sentencepiece/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c513f1577b7929c25c37b3ae4ec6685130bdc0ab --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/.gitignore @@ -0,0 +1,68 @@ +Makefile +Makefile.in +/ar-lib +/mdate-sh +/py-compile +/test-driver +/ylwrap +/build + +/autom4te.cache +/autoscan.log +/autoscan-*.log +/aclocal.m4 +/compile +/config.guess +/config.sub +/configure +/configure.scan +/depcomp +/install-sh +/missing +/stamp-h1 +/libtool +/config.h +/config.status +/autogen.sh +/ltmain.sh + +CMakeFiles +CMakeCache.txt +config.h +sentencepiece.pc + +*.o +*.lo +*.a +*.la +*.pyc + +.libs +.deps + +*.m4 +*.log +*.trs + +compile_charsmap + +spm_decode +spm_encode +spm_export_vocab +spm_train +spm_normalize +spm_test + +.DS_Store +*.egg-info/ +dist/ +*.swp +*.swo +*.pyc + +m.model +m.vocab + +cmake_install.cmake +libsentencepiece.so* +libsentencepiece_train.so* diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/.travis.yml b/cc-multilingual-main/cc_net/third_party/sentencepiece/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..417cffd0e90a22e9687c52a7ba365176262ed5c4 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/.travis.yml @@ -0,0 +1,74 @@ +language: cpp +matrix: + include: + - os: linux + env: IMAGE=ubuntu:rolling COMMAND=build_linux_gcc_coverall_ubuntu RELEASE_FILES="$TRAVIS_BUILD_DIR/build/*.xz" + services: docker + - os: linux + env: IMAGE=i386/ubuntu:rolling COMMAND=build_linux_gcc_ubuntu_i386 + services: docker + - os: linux + env: IMAGE=ubuntu:xenial COMMAND=build_linux_gcc_ubuntu_no_tf + services: docker + - os: linux + env: IMAGE=ubuntu:trusty COMMAND=build_linux_gcc_ubuntu_no_tf + services: docker + - os: linux + env: IMAGE=debian:stable COMMAND=build_linux_gcc_debian + services: docker + - os: linux + env: IMAGE=fedora:latest COMMAND=build_linux_gcc_fedora + services: docker + - os: linux + env: IMAGE=ubuntu:rolling COMMAND=build_linux_clang_ubuntu + services: docker + - os: linux + env: IMAGE=x86_64 COMMAND=make_py_wheel_py RELEASE_FILES="$TRAVIS_BUILD_DIR/python/dist/*manylinux*.whl" + script: + - $TRAVIS_BUILD_DIR/python/make_py_wheel.sh ${IMAGE} + - if [[ "$RELEASE_FILES" != "" ]]; then ls -l $RELEASE_FILES ; fi + services: docker + - os: linux + env: IMAGE=i686 COMMAND=make_py_wheel_py RELEASE_FILES="$TRAVIS_BUILD_DIR/python/dist/*manylinux*.whl" + script: + - $TRAVIS_BUILD_DIR/python/make_py_wheel.sh ${IMAGE} + - if [[ "$RELEASE_FILES" != "" ]]; then ls -l $RELEASE_FILES ; fi + services: docker + - os: linux + env: IMAGE=x86_64 COMMAND=make_py_wheel_tf RELEASE_FILES="$TRAVIS_BUILD_DIR/tensorflow/dist/*.whl" + script: + - $TRAVIS_BUILD_DIR/tensorflow/make_py_wheel.sh + - if [[ "$RELEASE_FILES" != "" ]]; then ls -l $RELEASE_FILES ; fi + services: docker + - os: osx + osx_image: xcode9.4 + env: IMAGE=native COMMAND=build_osx + - os: osx + osx_image: xcode9.4 + env: IMAGE=native COMMAND=make_py_wheel_mac_py RELEASE_FILES="$TRAVIS_BUILD_DIR/python/dist/delocated_wheel/*.whl" + script: + - $TRAVIS_BUILD_DIR/python/make_py_wheel_mac.sh + - if [[ "$RELEASE_FILES" != "" ]]; then ls -l $RELEASE_FILES ; fi + - os: osx + osx_image: xcode9.4 + env: IMAGE=native COMMAND=make_py_wheel_mac_tf RELEASE_FILES="$TRAVIS_BUILD_DIR/tensorflow/dist/*.whl" + script: + - $TRAVIS_BUILD_DIR/tensorflow/make_py_wheel_mac.sh + - if [[ "$RELEASE_FILES" != "" ]]; then ls -l $RELEASE_FILES ; fi +script: + - $TRAVIS_BUILD_DIR/test.sh ${IMAGE} ${COMMAND} + - if [[ "$RELEASE_FILES" != "" ]]; then ls -l $RELEASE_FILES ; fi +deploy: + provider: releases + skip_cleanup: true + api_key: + secure: WnrgfoRVSoi+E2YwFDgpQlxldfYQycN8DmMqbJab6uP0FWTmPptS9nmXWVGsXJS1u+sTsx/E+lM5xggl31u88hUJYsEUg+xPszSf+eiLfmdoEY+qYj2Vsuh7cT7P1tBScVMUiEQsoCcg9gZbHFHkSYJ74gyQxQhqJ52UmCJ1aNcp3nbtzgjBGvtsi2WBUdG1jSW0qwRj9gcq9eOWA4zkeHj9QKWhBtRD7fhpUiUDWVqaDSMu1E10QLNjkZ//qwbrWXb4MBzCa1ISla/ZoKv4TMQQrzYEwqxmbX2bxk1lMkJD3sKt3Wq/qNWDYaPKk9gz/cU9nAKwzSlJzus5c9pac6U/mh0IU8JhEGlkzFb1Ng3cHLdYT0hk0jAW15Ptcijqt+UGs0Arb1pdKvQV2e5bLEBrujCNGF8NFdsE23WDofEM/VKXuMNWW/j6b+VLESf05rz5p07IBMczLfW/Qs8mY5cqR9WaqPbYxMZlgwxtD+MiKERHlq1qVdK25M1UuB0wH/EbstVuEX2iNZRvffT9A+NglriLR74vNiCnfRlzGx4U4/Z79r2mwFrJTGupgq9N/jvKMs92qrT200VRtIto3JLEd3cnlM/9Gpv39SsYKA0seHKBpyFz/pGfXkOStv+14hzmEmXIFwG1QRTeFsZIUzmvvfMuhaG8Jjhdwpfvr68= + file_glob: true + file: "${RELEASE_FILES}" + on: + branch: master + tags: true + condition: $RELEASE_FILES != "" +env: + global: + secure: J52dK8uM1haWOP5Ktz01VETiYdpyOKtnGZXcZjxEXI7RV+44/MpkSSpKFrIex1jHDodn01Tv+/otmxotaz1HOPv4DgT2gg8FbHlpvnc6+B1/dEaeCDvnd33odmARoOszP0MNFTZdlvg6zGeJwPDYFfITn1jiFBtjazu19VIbQE4D1CSKkWsMXeyH1WjTb0LEtxhYwUcFgNqDb6trArx8xlvZNrh2/j5nPgAzvmuT0JuzwcRz9swwZftKcMjaK5JooSBTydtAzgVpVMZf1q+pF0nR9VlYIY34qQLsWirBjWHGRKdkgAEEN4vEMD1BKbhkIn7TjEpWLrH3BZuJY8uXAfnxvT8KXns2fhA1EDjlP/5n2y1jXAjqCZX8o1dC2fn6qxpL1Qg1WE0n9mhOZLMpbzCpJjBumjQPPUsviggRUs4awSYv3JrYuavvXQZ9rFM634O7CLIDVmbqssVyIYMhgIqLFAWgDxTyAxt+67vUy5ONsAenMOJ6bO36pYZHWH53isCRblUD5nq6Dj6WrW9P7lQhAdhvZ+Hyt+zyVCCblDY9lAv1KetU4i9sDSNYUkQtFTPVBw8LE4JmEctuM7iC6YqeneffPzzDLsGZ70m66VT1L4MYg5h2fGbtRuQ1nPz0+k2CNibN7NegaY35d7gUosnJJF04AeOUcea4+rgQkVM= diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/CMakeLists.txt b/cc-multilingual-main/cc_net/third_party/sentencepiece/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b6a92a4a83ad763db842f29d38f56cc917ed843d --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/CMakeLists.txt @@ -0,0 +1,106 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.! + +cmake_minimum_required(VERSION 3.1 FATAL_ERROR) +file(STRINGS "VERSION" SPM_VERSION) +message(STATUS "VERSION: ${SPM_VERSION}") +project(sentencepiece VERSION ${SPM_VERSION} LANGUAGES C CXX) + +option(SPM_ENABLE_NFKC_COMPILE "Enables NFKC compile" OFF) +option(SPM_ENABLE_SHARED "Builds shared libaries in addition to static libraries." ON) +option(SPM_BUILD_TEST "Builds test binaries." OFF) +option(SPM_COVERAGE "Runs gcov to test coverage." OFF) +option(SPM_ENABLE_TENSORFLOW_SHARED "Makes a tensorflow compatible shared file." OFF) +option(SPM_ENABLE_TCMALLOC "Enable TCMalloc if available." ON) +option(SPM_TCMALLOC_STATIC "Link static library of TCMALLOC." OFF) +option(SPM_NO_THREADLOCAL "Disable thread_local operator" OFF) +option(SPM_USE_BUILTIN_PROTOBUF "Use built-in protobuf" ON) + +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +set(prefix ${CMAKE_INSTALL_PREFIX}) +set(exec_prefix "\${prefix}") +set(libdir "\${exec_prefix}/lib") +set(includedir "\${prefix}/include") +set(GNUCXX_STD_SUPPORT_VERSION "4.3") + +if (SPM_USE_BUILTIN_PROTOBUF) + set(libprotobuf_lite "") +else() + set(libprotobuf_lite "-lprotobuf-lite") +endif() + +if (MSVC) + string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG}) + string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_MINSIZEREL ${CMAKE_CXX_FLAGS_MINSIZEREL}) + string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) + string(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_RELWITHDEBINFO ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}) + add_definitions("/wd4267 /wd4244 /wd4305 /Zc:strictStrings /utf-8") +endif() + +if (APPLE) + set(CMAKE_MACOSX_RPATH ON) + set(CMAKE_SKIP_BUILD_RPATH FALSE) + set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) + set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") + set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) + list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/lib" isSystemDir) + if ("${isSystemDir}" STREQUAL "-1") + set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") + endif() +endif() + +if (NOT DEFINED CMAKE_INSTALL_BINDIR) + set(CMAKE_INSTALL_BINDIR bin) +endif() + +if (NOT DEFINED CMAKE_INSTALL_LIBDIR) + set(CMAKE_INSTALL_LIBDIR lib) +endif() + +if (NOT DEFINED CMAKE_INSTALL_LIBDIR) + set(CMAKE_INSTALL_LIBDIR lib) +endif() + +if (NOT DEFINED CMAKE_INSTALL_INCDIR) + set(CMAKE_INSTALL_INCDIR include) +endif() + +configure_file("${PROJECT_SOURCE_DIR}/config.h.in" "config.h") +configure_file("${PROJECT_SOURCE_DIR}/sentencepiece.pc.in" "sentencepiece.pc" @ONLY) + +if (NOT MSVC) + install(FILES "${CMAKE_BINARY_DIR}/sentencepiece.pc" DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) +endif() + +include_directories(${CMAKE_SOURCE_DIR} ${PROJECT_BINARY_DIR}) + +if (SPM_BUILD_TEST) + enable_testing() +endif() + +add_subdirectory(src) +add_subdirectory(third_party) + +set(CPACK_SOURCE_GENERATOR "TXZ") +set(CPACK_GENERATOR "7Z") +set(CPACK_PACKAGE_VERSION "${SPM_VERSION}") +set(CPACK_STRIP_FILES TRUE) +set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_SOURCE_DIR}/LICENSE") +set(CPACK_RESOURCE_FILE_README "${PROJECT_SOURCE_DIR}/README.md") +set(CPACK_PACKAGE_CONTACT "taku@google.com") +set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Taku Kudo") +set(CPACK_SOURCE_IGNORE_FILES "/build/;/.git/;/dist/;/sdist/;~$;${CPACK_SOURCE_IGNORE_FILES}") +include(CPack) diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/CONTRIBUTING.md b/cc-multilingual-main/cc_net/third_party/sentencepiece/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..d0b993c87b9ab3adc10d60c48e25d0c9410f0c34 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/CONTRIBUTING.md @@ -0,0 +1,24 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement](https://cla.developers.google.com/about/google-individual) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance, that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up-front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use Github pull requests for this purpose. + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the [Software Grant and Corporate Contributor License Agreement](https://cla.developers.google.com/about/google-corporate). diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/LICENSE b/cc-multilingual-main/cc_net/third_party/sentencepiece/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/README.md b/cc-multilingual-main/cc_net/third_party/sentencepiece/README.md new file mode 100644 index 0000000000000000000000000000000000000000..706e2f3bbc128dd01e1c0b7e6a89a7dff7291c72 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/README.md @@ -0,0 +1,268 @@ +# SentencePiece + +[![Build Status](https://travis-ci.org/google/sentencepiece.svg?branch=master)](https://travis-ci.org/google/sentencepiece) +[![Build status](https://ci.appveyor.com/api/projects/status/vxoub3qx4fwpysyq?svg=true)](https://ci.appveyor.com/project/taku910/sentencepiece) +[![Coverage Status](https://coveralls.io/repos/github/google/sentencepiece/badge.svg?branch=master)](https://coveralls.io/github/google/sentencepiece?branch=master) +[![GitHub Issues](https://img.shields.io/github/issues/google/sentencepiece.svg)](https://github.com/google/sentencepiece/issues) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/5851945fc54947fc9e964f78c3b6bdfa)](https://app.codacy.com/app/taku910/sentencepiece?utm_source=github.com&utm_medium=referral&utm_content=google/sentencepiece&utm_campaign=Badge_Grade_Dashboard) +[![PyPI version](https://badge.fury.io/py/sentencepiece.svg)](https://badge.fury.io/py/sentencepiece) +[![Contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](CONTRIBUTING.md) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) + +SentencePiece is an unsupervised text tokenizer and detokenizer mainly for +Neural Network-based text generation systems where the vocabulary size +is predetermined prior to the neural model training. SentencePiece implements +**subword units** (e.g., **byte-pair-encoding (BPE)** [[Sennrich et al.](http://www.aclweb.org/anthology/P16-1162)]) and +**unigram language model** [[Kudo.](https://arxiv.org/abs/1804.10959)]) +with the extension of direct training from raw sentences. SentencePiece allows us to make a purely end-to-end system that does not depend on language-specific pre/postprocessing. + +**This is not an official Google product.** + +## Technical highlights +- **Purely data driven**: SentencePiece trains tokenization and detokenization + models from sentences. Pre-tokenization ([Moses tokenizer](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl)/[MeCab](http://taku910.github.io/mecab/)/[KyTea](http://www.phontron.com/kytea/)) is not always required. +- **Language independent**: SentencePiece treats the sentences just as sequences of Unicode characters. There is no language-dependent logic. +- **Multiple subword algorithms**: **BPE** [[Sennrich et al.](http://www.aclweb.org/anthology/P16-1162)] and **unigram language model** [[Kudo.](https://arxiv.org/abs/1804.10959)] are supported. +- **Subword regularization**: SentencePiece implements subword sampling for [subword regularization](https://arxiv.org/abs/1804.10959) which helps to improve the robustness and accuracy of NMT models. +- **Fast and lightweight**: Segmentation speed is around 50k sentences/sec, and memory footprint is around 6MB. +- **Self-contained**: The same tokenization/detokenization is obtained as long as the same model file is used. +- **Direct vocabulary id generation**: SentencePiece manages vocabulary to id mapping and can directly generate vocabulary id sequences from raw sentences. +- **NFKC-based normalization**: SentencePiece performs NFKC-based text normalization. + +## Comparisons with other implementations +|Feature|SentencePiece|[subword-nmt](https://github.com/rsennrich/subword-nmt)|[WordPiece](https://arxiv.org/pdf/1609.08144.pdf)| +|:---|:---:|:---:|:---:| +|Supported algorithm|BPE, unigram, char, word|BPE|BPE*| +|OSS?|Yes|Yes|Google internal| +|Subword regularization|[Yes](#subword-regularization)|No|No| +|Python Library (pip)|[Yes](python/README.md)|No|N/A| +|C++ Library|[Yes](doc/api.md)|No|N/A| +|Pre-segmentation required?|[No](#whitespace-is-treated-as-a-basic-symbol)|Yes|Yes| +|Customizable normalization (e.g., NFKC)|[Yes](doc/normalization.md)|No|N/A| +|Direct id generation|[Yes](#end-to-end-example)|No|N/A| + +Note that BPE algorithm used in WordPiece is slightly different from the original BPE. + +## Overview +### What is SentencePiece? +SentencePiece is a re-implementation of **sub-word units**, an effective way to alleviate the open vocabulary + problems in neural machine translation. SentencePiece supports two segmentation algorithms, **byte-pair-encoding (BPE)** [[Sennrich et al.](http://www.aclweb.org/anthology/P16-1162)] and **unigram language model** [[Kudo.](https://arxiv.org/abs/1804.10959)]. Here are the high level differences from other implementations. + +#### The number of unique tokens is predetermined +Neural Machine Translation models typically operate with a fixed +vocabulary. Unlike most unsupervised word segmentation algorithms, which +assume an infinite vocabulary, SentencePiece trains the segmentation model such +that the final vocabulary size is fixed, e.g., 8k, 16k, or 32k. + +Note that SentencePiece specifies the final vocabulary size for training, which is different from +[subword-nmt](https://github.com/rsennrich/subword-nmt) that uses the number of merge operations. +The number of merge operations is a BPE-specific parameter and not applicable to other segmentation algorithms, including unigram, word and character. + +#### Trains from raw sentences +Previous sub-word implementations assume that the input sentences are pre-tokenized. This constraint was required for efficient training, but makes the preprocessing complicated as we have to run language dependent tokenizers in advance. +The implementation of SentencePiece is fast enough to train the model from raw sentences. This is useful for training the tokenizer and detokenizer for Chinese, Japanese and Korean where no explicit spaces exist between words. + +#### Whitespace is treated as a basic symbol +The first step of Natural Language processing is text tokenization. For +example, a standard English tokenizer would segment the text "Hello world." into the +following three tokens. + +> [Hello] [World] [.] + +One observation is that the original input and tokenized sequence are **NOT +reversibly convertible**. For instance, the information that is no space between +“World” and “.” is dropped from the tokenized sequence, since e.g., `Tokenize(“World.”) == Tokenize(“World .”)` + +SentencePiece treats the input text just as a sequence of Unicode characters. Whitespace is also handled as a normal symbol. To handle the whitespace as a basic token explicitly, SentencePiece first escapes the whitespace with a meta symbol "▁" (U+2581) as follows. + +> Hello▁World. + +Then, this text is segmented into small pieces, for example: + +> [Hello] [▁Wor] [ld] [.] + +Since the whitespace is preserved in the segmented text, we can detokenize the text without any ambiguities. + +``` + detokenized = ''.join(pieces).replace('_', ' ') +``` + +This feature makes it possible to perform detokenization without relying on language-specific resources. + +Note that we cannot apply the same lossless conversions when splitting the +sentence with standard word segmenters, since they treat the whitespace as a +special symbol. Tokenized sequences do not preserve the necessary information to restore the original sentence. + +* (en) Hello world. → [Hello] [World] [.] \(A space between Hello and World\) +* (ja) こんにちは世界。 → [こんにちは] [世界] [。] \(No space between こんにちは and 世界\) + +#### Subword regularization +Subword regularization [[Kudo.](https://arxiv.org/abs/1804.10959)] is a simple regularization method +that virtually augments training data with on-the-fly subword sampling, which helps to improve the accuracy as well as robustness of NMT models. + +To enable subword regularization, you would like to integrate SentencePiece library +([C++](doc/api.md#sampling-subword-regularization)/[Python](python/README.md)) into the NMT system to sample one segmentation for each parameter update, which is different from the standard off-line data preparations. Here's the example of [Python library](python/README.md). You can find that 'New York' is segmented differently on each ``SampleEncode`` call. The details of sampling parameters are found in [sentencepiece_processor.h](src/sentencepiece_processor.h). + +``` +>>> import sentencepiece as spm +>>> s = spm.SentencePieceProcessor() +>>> s.Load('spm.model') +>>> for n in range(5): +... s.SampleEncodeAsPieces('New York', -1, 0.1) +... +['▁', 'N', 'e', 'w', '▁York'] +['▁', 'New', '▁York'] +['▁', 'New', '▁Y', 'o', 'r', 'k'] +['▁', 'New', '▁York'] +['▁', 'New', '▁York'] +``` + +## Installation + +### Python module +SentencePiece provides Python wrapper that supports both SentencePiece training and segmentation. +You can install Python binary package of SentencePiece with. + +``` +% pip install sentencepiece +``` + +For more detail, see [Python module](python/README.md) + +### C++ (from source) +The following tools and libraries are required to build SentencePiece: + +* [cmake](https://cmake.org/) +* C++11 compiler +* [gperftools](https://github.com/gperftools/gperftools) library (optional, 10-40% performance improvement can be obtained.) + +On Ubuntu, the build tools can be installed with apt-get: +``` +% sudo apt-get install cmake build-essential pkg-config libgoogle-perftools-dev +``` +#### Build and Install SentencePiece +``` +% cd /path/to/sentencepiece +% mkdir build +% cd build +% cmake .. +% make -j $(nproc) +% sudo make install +% sudo ldconfig -v +``` +On OSX/macOS, replace the last command with `sudo update_dyld_shared_cache` + +### TensorFlow module +See [tensorflow/README.md](tensorflow/README.md) + +## Usage instructions +### Train SentencePiece Model +``` +% spm_train --input= --model_prefix= --vocab_size=8000 --character_coverage=1.0 --model_type= +``` +* `--input`: one-sentence-per-line **raw** corpus file. No need to run + tokenizer, normalizer or preprocessor. By default, SentencePiece normalizes + the input with Unicode NFKC. You can pass a comma-separated list of files. +* `--model_prefix`: output model name prefix. `.model` and `.vocab` are generated. +* `--vocab_size`: vocabulary size, e.g., 8000, 16000, or 32000 +* `--character_coverage`: amount of characters covered by the model, good defaults are: `0.9995` for languages with rich character set like Japanse or Chinese and `1.0` for other languages with small character set. +* `--model_type`: model type. Choose from `unigram` (default), `bpe`, `char`, or `word`. The input sentence must be pretokenized when using `word` type. + +Use `--help` flag to display all parameters for training. + +### Encode raw text into sentence pieces/ids +``` +% spm_encode --model= --output_format=piece < input > output +% spm_encode --model= --output_format=id < input > output +``` + +Use `--extra_options` flag to insert the BOS/EOS markers or reverse the input sequence. +``` +% spm_encode --extra_options=eos (add only) +% spm_encode --extra_options=bos:eos (add and ) +% spm_encode --extra_options=reverse:bos:eos (reverse input and add and ) +``` + +SentencePiece supports nbest segmentation and segmentation sampling with `--output_format=(nbest|sample)_(piece|id)` flags. +``` +% spm_encode --model= --output_format=sample_piece --nbest_size=-1 --alpha=0.5 < input > output +% spm_encode --model= --output_format=nbest_id --nbest_size=10 < input > output +``` + +### Decode sentence pieces/ids into raw text +``` +% spm_decode --model= --input_format=piece < input > output +% spm_decode --model= --input_format=id < input > output +``` +Use `--extra_options` flag to decode the text in reverse order. +``` +% spm_decode --extra_options=reverse < input > output +``` + +### End-to-End Example +``` +% spm_train --input=data/botchan.txt --model_prefix=m --vocab_size=1000 +unigram_model_trainer.cc(494) LOG(INFO) Starts training with : +input: "../data/botchan.txt" +... +unigram_model_trainer.cc(529) LOG(INFO) EM sub_iter=1 size=1100 obj=10.4973 num_tokens=37630 num_tokens/piece=34.2091 +trainer_interface.cc(272) LOG(INFO) Saving model: m.model +trainer_interface.cc(281) LOG(INFO) Saving vocabs: m.vocab + +% echo "I saw a girl with a telescope." | spm_encode --model=m.model +▁I ▁saw ▁a ▁girl ▁with ▁a ▁ te le s c o pe . + +% echo "I saw a girl with a telescope." | spm_encode --model=m.model --output_format=id +9 459 11 939 44 11 4 142 82 8 28 21 132 6 + +% echo "9 459 11 939 44 11 4 142 82 8 28 21 132 6" | spm_decode --model=m.model --input_format=id +I saw a girl with a telescope. +``` +You can find that the original input sentence is restored from the vocabulary id sequence. + +### Export vocabulary list +``` +% spm_export_vocab --model= --output= +``` +`````` stores a list of vocabulary and emission log probabilities. The vocabulary id corresponds to the line number in this file. + +### Redefine special meta tokens + By default, SentencePiece uses Unknown (<unk>), BOS (<s>) and EOS (</s>) tokens which have the ids of 0, 1, and 2 respectively. We can redefine this mapping in the training phase as follows. + +``` +% spm_train --bos_id=0 --eos_id=1 --unk_id=5 --input=... --model_prefix=... --character_coverage=... +``` +When setting -1 id e.g., ```bos_id=-1```, this special token is disabled. Note that the unknow id cannot be disabled. We can define an id for padding (<pad>) as ```--pad_id=3```.   + +If you want to assign another special tokens, please see [Use custom symbols](doc/special_symbols.md). + +### Vocabulary restriction +```spm_encode``` accepts a ```--vocabulary``` and a ```--vocabulary_threshold``` option so that ```spm_encode``` will only produce symbols which also appear in the vocabulary (with at least some frequency). The background of this feature is described in [subword-nmt page](https://github.com/rsennrich/subword-nmt#best-practice-advice-for-byte-pair-encoding-in-nmt). + +The usage is basically the same as that of ```subword-nmt```. Assuming that L1 and L2 are the two languages (source/target languages), train the shared spm model, and get resulting vocabulary for each: + +``` +% cat {train_file}.L1 {train_file}.L2 | shuffle > train +% spm_train --input=train --model_prefix=spm --vocab_size=8000 --character_coverage=0.9995 +% spm_encode --model=spm.model --generate_vocabulary < {train_file}.L1 > {vocab_file}.L1 +% spm_encode --model=spm.model --generate_vocabulary < {train_file}.L2 > {vocab_file}.L2 +``` + +```shuffle``` command is used just in case because ```spm_train``` loads the first 10M lines of corpus by default. + + +Then segment train/test corpus with ```--vocabulary``` option +``` +% spm_encode --model=spm.model --vocabulary={vocab_file}.L1 --vocabulary_threshold=50 < {test_file}.L1 > {test_file}.seg.L1 +% spm_encode --model=spm.model --vocabulary={vocab_file}.L2 --vocabulary_threshold=50 < {test_file}.L2 > {test_file}.seg.L2 +``` + +## Advanced topics + +* [SentencePiece Experiments](doc/experiments.md) +* [SentencePieceProcessor C++ API](doc/api.md) +* [Use custom text normalization rules](doc/normalization.md) +* [Use custom symbols](doc/special_symbols.md) +* [Python Module](python/README.md) +* [TensorFlow Module](tensorflow/README.md) +* [Segmentation and training algorithms in detail] diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/VERSION b/cc-multilingual-main/cc_net/third_party/sentencepiece/VERSION new file mode 100644 index 0000000000000000000000000000000000000000..660b5ae0c0d81dcbbdf060cece159df3b114d1c1 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/VERSION @@ -0,0 +1 @@ +0.1.83 diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/appveyor.yml b/cc-multilingual-main/cc_net/third_party/sentencepiece/appveyor.yml new file mode 100644 index 0000000000000000000000000000000000000000..bffcf834405c07ca22ba916804863cc67f732cb4 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/appveyor.yml @@ -0,0 +1,24 @@ +version: '{branch} build {build}' +image: Visual Studio 2015 +platform: + - x64 + - Win32 +configuration: Release +clone_depth: 50 +clone_folder: c:\projects\sentencepiece +init: +build_script: +- cmd: call test.bat %platform% +artifacts: + - path: build\sentencepiece*.7z + - path: python\dist\*.whl +deploy: + description: 'SentencePiece Windows release' + provider: GitHub + auth_token: + secure: Aq4jHo/HY6WFFKs1h9cCWfi3U4ZsVTooUEhtgBfcJM6SUhnZdPVazIcKCtiR32kc + draft: false + prerelease: false + on: + branch: master + appveyor_repo_tag: true diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/config.h.in b/cc-multilingual-main/cc_net/third_party/sentencepiece/config.h.in new file mode 100644 index 0000000000000000000000000000000000000000..0413fea7adcf7b67417c040c201c55bf1b9b7434 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/config.h.in @@ -0,0 +1,9 @@ +#ifndef CONFIG_H_ +#define CONFIG_H_ + +#define VERSION "@PROJECT_VERSION@" +#define PACKAGE "@PROJECT_NAME@" +#define PACKAGE_STRING "@PROJECT_NAME@" + + +#endif // CONFIG_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/test.bat b/cc-multilingual-main/cc_net/third_party/sentencepiece/test.bat new file mode 100644 index 0000000000000000000000000000000000000000..384bb80f017f4b304fefa4f46ac180c33406ebe7 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/test.bat @@ -0,0 +1,36 @@ +set PLATFORM=%1 +if "%PLATFORM%"=="" set PLATFORM=x64 +set PLATFORM_PREFIX= +if "%PLATFORM%"=="x64" set PLATFORM_PREFIX=-x64 +set _CL_=/utf-8 +set PATH=c:\Program Files\Git\usr\bin;c:\MinGW\bin;%PATH% +set CURRENT_PATH=%~dp0 +set LIBRARY_PATH=%CURRENT_PATH%build\root + +mkdir build +cd build + +cmake .. -A %PLATFORM% -DSPM_BUILD_TEST=ON -DSPM_ENABLE_SHARED=OFF -DCMAKE_INSTALL_PREFIX=%LIBRARY_PATH% +cmake --build . --config Release --target install || goto :error +ctest -C Release || goto :error +cpack || goto :error + +cd ..\python +rem call :BuildPython C:\Python27%PLATFORM_PREFIX% +call :BuildPython C:\Python35%PLATFORM_PREFIX% +call :BuildPython C:\Python36%PLATFORM_PREFIX% +call :BuildPython C:\Python37%PLATFORM_PREFIX% +c:\Python37%PLATFORM_PREFIX%\python setup.py sdist || goto :error +exit + +:BuildPython +%1\python -m pip install wheel || goto :error +%1\python setup.py build || goto :error +%1\python setup.py bdist_wheel || goto :error +%1\python setup.py test || goto :error +rmdir /Q /S build +del /S *.pyd +exit /b + +:error +exit /b %errorlevel% diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/test.sh b/cc-multilingual-main/cc_net/third_party/sentencepiece/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..b1c3e03af1aebc9b02474dc004802ddf9956ee8e --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/test.sh @@ -0,0 +1,171 @@ +#!/bin/sh + +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.! + +set -e # exit immediately on error +set -x # display all commands + +setup_ubuntu() { + apt-get update + apt-get install -y build-essential cmake git pkg-config python-pip python3-pip + + . /etc/os-release + if [ "${VERSION_ID}" = "14.04" ]; then + apt-get install -y cmake3 python-dev + fi +} + +setup_debian() { + setup_ubuntu +} + +setup_fedora() { + dnf update -y + dnf install -y rpm-build gcc-c++ make cmake pkg-config python-pip python-devel +} + +build_generic() { + mkdir -p build + cd build + cmake .. -DSPM_BUILD_TEST=ON + make -j2 + make test + make package_source + cd .. +} + +build_python() { + cd build + make install + cd .. + export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH + export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig + ldconfig -v + cd python + python setup.py test + cd .. +} + +build_tensorflow() { + cd tensorflow + pip install tensorflow + python setup.py bdist_wheel + python setup.py sdist + python setup.py test + cd .. +} + +build_linux_gcc_coverall_ubuntu() { + setup_debian + apt-get install -y lcov + pip install cpp-coveralls + pip install 'requests[security]' + build_generic + build_python + build_tensorflow + mkdir -p build + cd build + cmake .. -DSPM_COVERAGE=ON + make -j2 + make coverage + coveralls --exclude-pattern '.*(include|usr|test|third_party|pb|_main).*' --gcov-options '\-lp' --gcov gcov + cd .. +} + +build_linux_gcc_ubuntu() { + setup_ubuntu + build_generic + build_python + build_tensorflow +} + +build_linux_gcc_ubuntu_no_tf() { + setup_ubuntu + build_generic + build_python +} + +build_linux_gcc_ubuntu_i386() { + setup_ubuntu + build_generic + build_python +} + +build_linux_gcc_debian() { + setup_debian + build_generic + build_python + build_tensorflow +} + +build_linux_gcc_fedora() { + setup_fedora + build_generic + build_python + build_tensorflow +} + +build_linux_clang_ubuntu() { + setup_ubuntu +# for v in 3.9 4.0 5.0 6.0; do + for v in 6.0; do + apt-get install -y clang-${v} + export CXX="clang++-${v}" CC="clang-${v}" + build_generic + rm -fr build + done +} + +build_osx() { + brew update + brew install protobuf || brew link --overwrite protobuf + brew link --overwrite python@2 + build_generic + cd build + make install + cd .. + cd python + # Test default Python + python setup.py test + python setup.py clean + # Test Python2 + /usr/local/bin/python setup.py test + /usr/local/bin/python setup.py clean + # Upgrade to Python3 + brew upgrade python || true + /usr/local/bin/python3 setup.py test + /usr/local/bin/python3 setup.py clean + cd .. +} + +run_docker() { + docker pull "$1" + docker run -e COVERALLS_REPO_TOKEN=${COVERALLS_REPO_TOKEN} --rm -ti --name travis-ci -v `pwd`:/sentencepiece -w /sentencepiece -td "$1" /bin/bash + docker exec travis-ci bash -c "./test.sh native $2" + docker stop travis-ci +} + +## main +if [ "$#" -ne 2 ]; then + echo "sh test.sh ." + echo "when is native, runs command natively without docker." + exit +fi + +if [ "$1" = "native" ]; then + eval "$2" +else + run_docker $1 $2 +fi diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_table_driven_lite.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_table_driven_lite.cc new file mode 100644 index 0000000000000000000000000000000000000000..961329f3679a45a95d0d97705fd483c19248d1ba --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_table_driven_lite.cc @@ -0,0 +1,109 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include + +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +namespace { + +string* MutableUnknownFields(MessageLite* msg, int64 arena_offset) { + return Raw(msg, arena_offset) + ->mutable_unknown_fields(); +} + +struct UnknownFieldHandlerLite { + static bool Skip(MessageLite* msg, const ParseTable& table, + io::CodedInputStream* input, + int tag) { + GOOGLE_DCHECK(!table.unknown_field_set); + ::google::protobuf::io::StringOutputStream unknown_fields_string( + MutableUnknownFields(msg, table.arena_offset)); + ::google::protobuf::io::CodedOutputStream unknown_fields_stream( + &unknown_fields_string, false); + + return ::google::protobuf::internal::WireFormatLite::SkipField( + input, tag, &unknown_fields_stream); + } + + static void Varint(MessageLite* msg, const ParseTable& table, + int tag, int value) { + GOOGLE_DCHECK(!table.unknown_field_set); + + ::google::protobuf::io::StringOutputStream unknown_fields_string( + MutableUnknownFields(msg, table.arena_offset)); + ::google::protobuf::io::CodedOutputStream unknown_fields_stream( + &unknown_fields_string, false); + unknown_fields_stream.WriteVarint32(tag); + unknown_fields_stream.WriteVarint32(value); + } + + static bool ParseExtension( + MessageLite* msg, const ParseTable& table, + io::CodedInputStream* input, int tag) { + ExtensionSet* extensions = GetExtensionSet(msg, table.extension_offset); + if (extensions == NULL) { + return false; + } + + const MessageLite* prototype = table.default_instance(); + + GOOGLE_DCHECK(!table.unknown_field_set); + ::google::protobuf::io::StringOutputStream unknown_fields_string( + MutableUnknownFields(msg, table.arena_offset)); + ::google::protobuf::io::CodedOutputStream unknown_fields_stream( + &unknown_fields_string, false); + return extensions->ParseField( + tag, input, prototype, &unknown_fields_stream); + } +}; + +} // namespace + +bool MergePartialFromCodedStreamLite( + MessageLite* msg, const ParseTable& table, io::CodedInputStream* input) { + return MergePartialFromCodedStreamImpl( + msg, table, input); +} + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/repeated_field.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/repeated_field.cc new file mode 100644 index 0000000000000000000000000000000000000000..310000aabb65f3d20c61a0020bb5b22db53fb4c5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/repeated_field.cc @@ -0,0 +1,126 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include + +#include +#include +#include + +namespace google { +namespace protobuf { + +namespace internal { + +void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) { + int new_size = current_size_ + extend_amount; + if (total_size_ >= new_size) { + // N.B.: rep_ is non-NULL because extend_amount is always > 0, hence + // total_size must be non-zero since it is lower-bounded by new_size. + return &rep_->elements[current_size_]; + } + Rep* old_rep = rep_; + Arena* arena = GetArenaNoVirtual(); + new_size = std::max(kMinRepeatedFieldAllocationSize, + std::max(total_size_ * 2, new_size)); + GOOGLE_CHECK_LE(new_size, + (std::numeric_limits::max() - kRepHeaderSize) / + sizeof(old_rep->elements[0])) + << "Requested size is too large to fit into size_t."; + size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size; + if (arena == NULL) { + rep_ = reinterpret_cast(::operator new(bytes)); + } else { + rep_ = reinterpret_cast( + ::google::protobuf::Arena::CreateArray(arena, bytes)); + } +#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) + const int old_total_size = total_size_; +#endif + total_size_ = new_size; + if (old_rep && old_rep->allocated_size > 0) { + memcpy(rep_->elements, old_rep->elements, + old_rep->allocated_size * sizeof(rep_->elements[0])); + rep_->allocated_size = old_rep->allocated_size; + } else { + rep_->allocated_size = 0; + } + if (arena == NULL) { +#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) + const size_t old_size = + old_total_size * sizeof(rep_->elements[0]) + kRepHeaderSize; + ::operator delete(static_cast(old_rep), old_size); +#else + ::operator delete(static_cast(old_rep)); +#endif + } + return &rep_->elements[current_size_]; +} + +void RepeatedPtrFieldBase::Reserve(int new_size) { + if (new_size > current_size_) { + InternalExtend(new_size - current_size_); + } +} + +void RepeatedPtrFieldBase::CloseGap(int start, int num) { + if (rep_ == NULL) return; + // Close up a gap of "num" elements starting at offset "start". + for (int i = start + num; i < rep_->allocated_size; ++i) + rep_->elements[i - num] = rep_->elements[i]; + current_size_ -= num; + rep_->allocated_size -= num; +} + +google::protobuf::MessageLite* RepeatedPtrFieldBase::AddWeak( + const google::protobuf::MessageLite* prototype) { + if (rep_ != NULL && current_size_ < rep_->allocated_size) { + return reinterpret_cast( + rep_->elements[current_size_++]); + } + if (!rep_ || rep_->allocated_size == total_size_) { + Reserve(total_size_ + 1); + } + ++rep_->allocated_size; + google::protobuf::MessageLite* result = prototype ? prototype->New(arena_) : + Arena::CreateMessage(arena_); + rep_->elements[current_size_++] = result; + return result; +} + +} // namespace internal + + +} // namespace protobuf +} // namespace google