diff --git a/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4966750d8ec9e99a6f28b9975d27b03bfc39fc85 --- /dev/null +++ b/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8289f624651c26b48ff5a21d447fab47c63817b5433b361bf73f3038133ac12f +size 50332843 diff --git a/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..61c88d7b892bf16641f0a2134e84deda51ba59fa --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ba335e43797248a530e3eb0108f602b6d515f1c55ae0ec0bbc572f8f75e4cf8 +size 9372 diff --git a/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..bd2acca9c9de8e69e6d5492365ed3234f400d212 --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af4d642574c5598700c678b71beb624e99a140212daab615bc5b243295baa3cb +size 9387 diff --git a/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..7aa60f4f07a1156b1a452bd5298a8ab871b67171 --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:953100e3eaba559ce7382929b9d52d98680a68054aeb16930fdc0278b8db52af +size 9293 diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5090088512e66177dbc490880a6a1cb059b8a2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deb2c0397757ae746e2dcb88c787e76f49ea507b Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0213711008698de4f73418ba4a9e7418c8100c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5993a7907046cca1fe36006c1aa95a2082d305b Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42582ac0f519e942fb9a7058932cdf978f8da4f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9fae859c3a7376e2f42725840e42a74a1fd4eeb Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f119ccbcbe0ff230d788fa147d543a427dd61c2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7a3aae18fe39979996cd110a818750da8f98cbe Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae6e5499c566f7b068d7b58468a69cd2311f40cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffe4a950a3c60b8a0f7e74a26d311c8a977f97eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94ec33bb413b32ae6cd7e06c9ed0052c0890561a Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1069427d34096009fa174ebce2718b0004124cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acc899fd91929c46857c1944692c25a29a92b60f Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1ae75683c6b1a8877ce252f726490fd17fd6144 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cff2952e140a77102020cef50131c0d146bddd2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa248027020e673ba54f585c468339435f52c8e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405601b90665bd4f82cc8c104ae3de8820cb31d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc78107a86949b78bebd0aaa25e8b2601edca729 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e487646e664b58213367cd031f75023999de718c Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfb8ad3464d21a99af45bafacb3f6389725a22d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b31a7257e5dcce61cd09a28d74243166bcd5b52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e72480ae7827b0c4ff18a05edf235e85ca8aa420 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8379df8d4896ba80426d2a2985f3d01644bbdd97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5c318fbbf36e97b98f62d663bf1ac6debd54f59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__init__.py b/venv/lib/python3.10/site-packages/datasets/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..905e753955a348a8e486302e1b6f5e8f53ec7bf4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/__init__.py @@ -0,0 +1,13 @@ +from abc import ABC, abstractmethod +from argparse import ArgumentParser + + +class BaseDatasetsCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..364ee27c5797fc80f8122500977a4f4c65dab438 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ce80379ae1bc02bfbb657fcb6762cd70c889da9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49ae416d1b393c6746ef3e60dd89ca64dda4166f Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..467521ea554254fc94eb348473ceb42d21f02399 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06b0da48a68dcc962a295b0a7502e17e9d7e5303 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/commands/convert.py b/venv/lib/python3.10/site-packages/datasets/commands/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..f50d6aae5ba2e5c8b3c9766fa639c68ba87b2988 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/convert.py @@ -0,0 +1,195 @@ +import os +import re +import shutil +from argparse import ArgumentParser, Namespace + +from datasets.commands import BaseDatasetsCLICommand +from datasets.utils.logging import get_logger + + +HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """ + +HIGHLIGHT_MESSAGE_POST = """======= +>>>>>>> +""" + +TO_HIGHLIGHT = [ + "TextEncoderConfig", + "ByteTextEncoder", + "SubwordTextEncoder", + "encoder_config", + "maybe_build_from_corpus", + "manual_dir", +] + +TO_CONVERT = [ + # (pattern, replacement) + # Order is important here for some replacements + (r"tfds\.core", r"datasets"), + (r"tf\.io\.gfile\.GFile", r"open"), + (r"tf\.([\w\d]+)", r"datasets.Value('\1')"), + (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"), + (r"tfds\.features\.Text\(", r"datasets.Value('string'),"), + (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), + (r"tfds\.features\.FeaturesDict\(", r"dict("), + (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), + (r"tfds\.", r"datasets."), + (r"dl_manager\.manual_dir", r"self.config.data_dir"), + (r"self\.builder_config", r"self.config"), +] + + +def convert_command_factory(args: Namespace): + """ + Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. + + Returns: ConvertCommand + """ + return ConvertCommand(args.tfds_path, args.datasets_directory) + + +class ConvertCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + """ + Register this command to argparse so it's available for the datasets-cli + + Args: + parser: Root parser to register command-specific arguments + """ + train_parser = parser.add_parser( + "convert", + help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", + ) + train_parser.add_argument( + "--tfds_path", + type=str, + required=True, + help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", + ) + train_parser.add_argument( + "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder." + ) + train_parser.set_defaults(func=convert_command_factory) + + def __init__(self, tfds_path: str, datasets_directory: str, *args): + self._logger = get_logger("datasets-cli/converting") + + self._tfds_path = tfds_path + self._datasets_directory = datasets_directory + + def run(self): + if os.path.isdir(self._tfds_path): + abs_tfds_path = os.path.abspath(self._tfds_path) + elif os.path.isfile(self._tfds_path): + abs_tfds_path = os.path.dirname(self._tfds_path) + else: + raise ValueError("--tfds_path is neither a directory nor a file. Please check path.") + + abs_datasets_path = os.path.abspath(self._datasets_directory) + + self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}") + + utils_files = [] + with_manual_update = [] + imports_to_builder_map = {} + + if os.path.isdir(self._tfds_path): + file_names = os.listdir(abs_tfds_path) + else: + file_names = [os.path.basename(self._tfds_path)] + + for f_name in file_names: + self._logger.info(f"Looking at file {f_name}") + input_file = os.path.join(abs_tfds_path, f_name) + output_file = os.path.join(abs_datasets_path, f_name) + + if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: + self._logger.info("Skipping file") + continue + + with open(input_file, encoding="utf-8") as f: + lines = f.readlines() + + out_lines = [] + is_builder = False + needs_manual_update = False + tfds_imports = [] + for line in lines: + out_line = line + + # Convert imports + if "import tensorflow.compat.v2 as tf" in out_line: + continue + elif "@tfds.core" in out_line: + continue + elif "builder=self" in out_line: + continue + elif "import tensorflow_datasets.public_api as tfds" in out_line: + out_line = "import datasets\n" + elif "import tensorflow" in out_line: + # order is important here + out_line = "" + continue + elif "from absl import logging" in out_line: + out_line = "from datasets import logging\n" + elif "getLogger" in out_line: + out_line = out_line.replace("getLogger", "get_logger") + elif any(expression in out_line for expression in TO_HIGHLIGHT): + needs_manual_update = True + to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT)) + out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n") + out_lines.append(out_line) + out_lines.append(HIGHLIGHT_MESSAGE_POST) + continue + else: + for pattern, replacement in TO_CONVERT: + out_line = re.sub(pattern, replacement, out_line) + + # Take care of saving utilities (to later move them together with main script) + if "tensorflow_datasets" in out_line: + match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line) + tfds_imports.extend(imp.strip() for imp in match.group(1).split(",")) + out_line = "from . import " + match.group(1) + + # Check we have not forget anything + if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: + raise ValueError(f"Error converting {out_line.strip()}") + + if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: + is_builder = True + out_lines.append(out_line) + + if is_builder or "wmt" in f_name: + # We create a new directory for each dataset + dir_name = f_name.replace(".py", "") + output_dir = os.path.join(abs_datasets_path, dir_name) + output_file = os.path.join(output_dir, f_name) + os.makedirs(output_dir, exist_ok=True) + self._logger.info(f"Adding directory {output_dir}") + imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) + else: + # Utilities will be moved at the end + utils_files.append(output_file) + + if needs_manual_update: + with_manual_update.append(output_file) + + with open(output_file, "w", encoding="utf-8") as f: + f.writelines(out_lines) + self._logger.info(f"Converted in {output_file}") + + for utils_file in utils_files: + try: + f_name = os.path.basename(utils_file) + dest_folder = imports_to_builder_map[f_name.replace(".py", "")] + self._logger.info(f"Moving {dest_folder} to {utils_file}") + shutil.copy(utils_file, dest_folder) + except KeyError: + self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.") + + if with_manual_update: + for file_path in with_manual_update: + self._logger.warning( + f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." + ) diff --git a/venv/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py b/venv/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..e13494740918c3fc93b528d4a2d41049d83b72f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py @@ -0,0 +1,156 @@ +import time +from argparse import ArgumentParser +from typing import Optional + +from huggingface_hub import HfApi, create_branch, get_repo_discussions + +from datasets import get_dataset_config_names, get_dataset_default_config_name, load_dataset +from datasets.commands import BaseDatasetsCLICommand + + +def _command_factory(args): + return ConvertToParquetCommand( + args.dataset_id, + args.token, + args.revision, + args.trust_remote_code, + ) + + +class ConvertToParquetCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser): + parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet") + parser.add_argument("dataset_id", help="source dataset ID") + parser.add_argument("--token", help="access token to the Hugging Face Hub") + parser.add_argument("--revision", help="source revision") + parser.add_argument( + "--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script" + ) + parser.set_defaults(func=_command_factory) + + def __init__( + self, + dataset_id: str, + token: Optional[str], + revision: Optional[str], + trust_remote_code: bool, + ): + self._dataset_id = dataset_id + self._token = token + self._revision = revision + self._trust_remote_code = trust_remote_code + + def run(self) -> None: + dataset_id = self._dataset_id + token = self._token + revision = self._revision + trust_remote_code = self._trust_remote_code + print(f"{dataset_id}") + configs = get_dataset_config_names( + dataset_id, token=token, revision=revision, trust_remote_code=trust_remote_code + ) + print(f"{configs = }") + default_config = get_dataset_default_config_name( + dataset_id, token=token, revision=revision, trust_remote_code=trust_remote_code + ) + print(f"{default_config = }") + if default_config: + config = default_config + configs.remove(default_config) + else: + config = configs.pop(0) + print(f"{config = }") + dataset = load_dataset(dataset_id, config, revision=revision, trust_remote_code=trust_remote_code) + commit_info = dataset.push_to_hub( + dataset_id, + config_name=config, + commit_message="Convert dataset to Parquet", + commit_description="Convert dataset to Parquet.", + create_pr=True, + token=token, + set_default=default_config is not None, + ) + time.sleep(5) + if commit_info: + pr_revision, pr_url = commit_info.pr_revision, commit_info.pr_url + else: + pr_revision, pr_url = infer_pr(dataset_id, token=token) + for config in configs: + print(f"{config = }") + dataset = load_dataset(dataset_id, config, revision=revision, trust_remote_code=trust_remote_code) + dataset.push_to_hub( + dataset_id, + config_name=config, + commit_message=f"Add {config} data files", + revision=pr_revision, + token=token, + ) + time.sleep(5) + delete_files(dataset_id, revision=pr_revision, token=token) + if not revision: + create_branch(dataset_id, branch="script", repo_type="dataset", token=token, exist_ok=True) + print(f"You can find your PR to convert the dataset to Parquet at: {pr_url}") + + +def infer_pr(dataset_id, token=None): + discussions = get_repo_discussions(dataset_id, repo_type="dataset", token=token) + prs = [discussion for discussion in discussions if discussion.is_pull_request and discussion.status == "open"] + pr = sorted(prs, key=lambda pr: pr.num)[-1] + return pr.git_reference, pr.url + + +def delete_files(dataset_id, revision=None, token=None): + dataset_name = dataset_id.split("/")[-1] + hf_api = HfApi(token=token) + repo_files = hf_api.list_repo_files( + dataset_id, + repo_type="dataset", + ) + if repo_files: + legacy_json_file = [] + python_files = [] + data_files = [] + for filename in repo_files: + if filename in {".gitattributes", "README.md"}: + continue + elif filename == f"{dataset_name}.py": + hf_api.delete_file( + filename, + dataset_id, + repo_type="dataset", + revision=revision, + commit_message="Delete loading script", + ) + elif filename == "dataset_infos.json": + legacy_json_file.append(filename) + elif filename.endswith(".py"): + python_files.append(filename) + else: + data_files.append(filename) + if legacy_json_file: + hf_api.delete_file( + "dataset_infos.json", + dataset_id, + repo_type="dataset", + revision=revision, + commit_message="Delete legacy dataset_infos.json", + ) + if python_files: + for filename in python_files: + hf_api.delete_file( + filename, + dataset_id, + repo_type="dataset", + revision=revision, + commit_message="Delete loading script auxiliary file", + ) + if data_files: + for filename in data_files: + hf_api.delete_file( + filename, + dataset_id, + repo_type="dataset", + revision=revision, + commit_message="Delete data file", + ) diff --git a/venv/lib/python3.10/site-packages/datasets/commands/datasets_cli.py b/venv/lib/python3.10/site-packages/datasets/commands/datasets_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..23afce216fa41d244211f5c2dd249b5c24fe8d05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/datasets_cli.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +from argparse import ArgumentParser + +from datasets.commands.convert import ConvertCommand +from datasets.commands.convert_to_parquet import ConvertToParquetCommand +from datasets.commands.dummy_data import DummyDataCommand +from datasets.commands.env import EnvironmentCommand +from datasets.commands.run_beam import RunBeamCommand +from datasets.commands.test import TestCommand +from datasets.utils.logging import set_verbosity_info + + +def parse_unknown_args(unknown_args): + return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])} + + +def main(): + parser = ArgumentParser( + "HuggingFace Datasets CLI tool", usage="datasets-cli []", allow_abbrev=False + ) + commands_parser = parser.add_subparsers(help="datasets-cli command helpers") + set_verbosity_info() + + # Register commands + ConvertCommand.register_subcommand(commands_parser) + EnvironmentCommand.register_subcommand(commands_parser) + TestCommand.register_subcommand(commands_parser) + RunBeamCommand.register_subcommand(commands_parser) + DummyDataCommand.register_subcommand(commands_parser) + ConvertToParquetCommand.register_subcommand(commands_parser) + + # Parse args + args, unknown_args = parser.parse_known_args() + if not hasattr(args, "func"): + parser.print_help() + exit(1) + kwargs = parse_unknown_args(unknown_args) + + # Run + service = args.func(args, **kwargs) + service.run() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/datasets/commands/dummy_data.py b/venv/lib/python3.10/site-packages/datasets/commands/dummy_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c4321696e67258d80d40422a327dccb35859545d --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/dummy_data.py @@ -0,0 +1,468 @@ +import fnmatch +import json +import os +import shutil +import tempfile +import xml.etree.ElementTree as ET +from argparse import ArgumentParser +from pathlib import Path +from typing import Optional + +from datasets import config +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_config import DownloadConfig +from datasets.download.download_manager import DownloadManager +from datasets.download.mock_download_manager import MockDownloadManager +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.deprecation_utils import deprecated +from datasets.utils.logging import get_logger, set_verbosity_warning +from datasets.utils.py_utils import map_nested + + +logger = get_logger(__name__) + +DEFAULT_ENCODING = "utf-8" + + +def dummy_data_command_factory(args): + return DummyDataCommand( + args.path_to_dataset, + args.auto_generate, + args.n_lines, + args.json_field, + args.xml_tag, + args.match_text_files, + args.keep_uncompressed, + args.cache_dir, + args.encoding, + ) + + +class DummyDataGeneratorDownloadManager(DownloadManager): + def __init__(self, mock_download_manager, *args, **kwargs): + super().__init__(*args, **kwargs) + self.mock_download_manager = mock_download_manager + self.downloaded_dummy_paths = [] + self.expected_dummy_paths = [] + + def download(self, url_or_urls): + output = super().download(url_or_urls) + dummy_output = self.mock_download_manager.download(url_or_urls) + map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) + map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) + return output + + def download_and_extract(self, url_or_urls): + output = super().extract(super().download(url_or_urls)) + dummy_output = self.mock_download_manager.download(url_or_urls) + map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) + map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) + return output + + def auto_generate_dummy_data_folder( + self, + n_lines: int = 5, + json_field: Optional[str] = None, + xml_tag: Optional[str] = None, + match_text_files: Optional[str] = None, + encoding: Optional[str] = None, + ) -> bool: + os.makedirs( + os.path.join( + self.mock_download_manager.datasets_scripts_dir, + self.mock_download_manager.dataset_name, + self.mock_download_manager.dummy_data_folder, + "dummy_data", + ), + exist_ok=True, + ) + total = 0 + self.mock_download_manager.load_existing_dummy_data = False + for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths): + dst_path = os.path.join( + self.mock_download_manager.datasets_scripts_dir, + self.mock_download_manager.dataset_name, + self.mock_download_manager.dummy_data_folder, + relative_dst_path, + ) + total += self._create_dummy_data( + src_path, + dst_path, + n_lines=n_lines, + json_field=json_field, + xml_tag=xml_tag, + match_text_files=match_text_files, + encoding=encoding, + ) + if total == 0: + logger.error( + "Dummy data generation failed: no dummy files were created. " + "Make sure the data files format is supported by the auto-generation." + ) + return total > 0 + + def _create_dummy_data( + self, + src_path: str, + dst_path: str, + n_lines: int, + json_field: Optional[str] = None, + xml_tag: Optional[str] = None, + match_text_files: Optional[str] = None, + encoding: Optional[str] = None, + ) -> int: + encoding = encoding or DEFAULT_ENCODING + if os.path.isfile(src_path): + logger.debug(f"Trying to generate dummy data file {dst_path}") + dst_path_extensions = Path(dst_path).suffixes + line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"] + is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions) + if match_text_files is not None: + file_name = os.path.basename(dst_path) + for pattern in match_text_files.split(","): + is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern) + # Line by line text file (txt, csv etc.) + if is_line_by_line_text_file: + Path(dst_path).parent.mkdir(exist_ok=True, parents=True) + with open(src_path, encoding=encoding) as src_file: + with open(dst_path, "w", encoding=encoding) as dst_file: + first_lines = [] + for i, line in enumerate(src_file): + if i >= n_lines: + break + first_lines.append(line) + dst_file.write("".join(first_lines).strip()) + return 1 + # json file + elif ".json" in dst_path_extensions: + with open(src_path, encoding=encoding) as src_file: + json_data = json.load(src_file) + if json_field is not None: + json_data = json_data[json_field] + if isinstance(json_data, dict): + if not all(isinstance(v, list) for v in json_data.values()): + raise ValueError( + f"Couldn't parse columns {list(json_data.keys())}. " + "Maybe specify which json field must be used " + "to read the data with --json_field ." + ) + first_json_data = {k: v[:n_lines] for k, v in json_data.items()} + else: + first_json_data = json_data[:n_lines] + if json_field is not None: + first_json_data = {json_field: first_json_data} + Path(dst_path).parent.mkdir(exist_ok=True, parents=True) + with open(dst_path, "w", encoding=encoding) as dst_file: + json.dump(first_json_data, dst_file) + return 1 + # xml file + elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]): + if xml_tag is None: + logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag") + else: + self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding) + return 1 + logger.warning( + f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data." + ) + return 0 + # directory, iterate through all files + elif os.path.isdir(src_path): + total = 0 + for path, _, files in os.walk(src_path): + for name in files: + if not name.startswith("."): # ignore files like .DS_Store etc. + src_file_path = os.path.join(path, name) + dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path)) + total += self._create_dummy_data( + src_file_path, + dst_file_path, + n_lines=n_lines, + json_field=json_field, + xml_tag=xml_tag, + match_text_files=match_text_files, + encoding=encoding, + ) + return total + + @staticmethod + def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING): + Path(dst_path).parent.mkdir(exist_ok=True, parents=True) + with open(src_path, encoding=encoding) as src_file: + n_line = 0 + parents = [] + for event, elem in ET.iterparse(src_file, events=("start", "end")): + if event == "start": + parents.append(elem) + else: + _ = parents.pop() + if elem.tag == xml_tag: + if n_line < n_lines: + n_line += 1 + else: + if parents: + parents[-1].remove(elem) + ET.ElementTree(element=elem).write(dst_path, encoding=encoding) + + def compress_autogenerated_dummy_data(self, path_to_dataset): + root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder) + base_name = os.path.join(root_dir, "dummy_data") + base_dir = "dummy_data" + logger.info(f"Compressing dummy data folder to '{base_name}.zip'") + shutil.make_archive(base_name, "zip", root_dir, base_dir) + shutil.rmtree(base_name) + + +@deprecated( + "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI." +) +class DummyDataCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + test_parser = parser.add_parser("dummy_data", help="Generate dummy data.") + test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data") + test_parser.add_argument( + "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data" + ) + test_parser.add_argument( + "--json_field", + type=str, + default=None, + help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)", + ) + test_parser.add_argument( + "--xml_tag", + type=str, + default=None, + help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.", + ) + test_parser.add_argument( + "--match_text_files", + type=str, + default=None, + help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label", + ) + test_parser.add_argument( + "--keep_uncompressed", + action="store_true", + help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.", + ) + test_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory to download and cache files when auto-generating dummy data", + ) + test_parser.add_argument( + "--encoding", + type=str, + default=None, + help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}", + ) + test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)") + test_parser.set_defaults(func=dummy_data_command_factory) + + def __init__( + self, + path_to_dataset: str, + auto_generate: bool, + n_lines: int, + json_field: Optional[str], + xml_tag: Optional[str], + match_text_files: Optional[str], + keep_uncompressed: bool, + cache_dir: Optional[str], + encoding: Optional[str], + ): + self._path_to_dataset = path_to_dataset + if os.path.isdir(path_to_dataset): + self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1] + else: + self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2] + cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE) + self._auto_generate = auto_generate + self._n_lines = n_lines + self._json_field = json_field + self._xml_tag = xml_tag + self._match_text_files = match_text_files + self._keep_uncompressed = keep_uncompressed + self._cache_dir = cache_dir + self._encoding = encoding + + def run(self): + set_verbosity_warning() + dataset_module = dataset_module_factory(self._path_to_dataset) + builder_cls = import_main_class(dataset_module.module_path) + + # use `None` as config if no configs + builder_configs = builder_cls.BUILDER_CONFIGS or [None] + auto_generate_results = [] + with tempfile.TemporaryDirectory() as tmp_dir: + for builder_config in builder_configs: + config_name = builder_config.name if builder_config else None + dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir) + version = builder_config.version if builder_config else dataset_builder.config.version + mock_dl_manager = MockDownloadManager( + dataset_name=self._dataset_name, + config=builder_config, + version=version, + use_local_dummy_data=True, + load_existing_dummy_data=False, + ) + + if self._auto_generate: + auto_generate_results.append( + self._autogenerate_dummy_data( + dataset_builder=dataset_builder, + mock_dl_manager=mock_dl_manager, + keep_uncompressed=self._keep_uncompressed, + ) + ) + else: + self._print_dummy_data_instructions( + dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager + ) + if self._auto_generate and not self._keep_uncompressed: + if all(auto_generate_results): + print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'") + else: + print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'") + + def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]: + dl_cache_dir = ( + os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR) + if self._cache_dir + else config.DOWNLOADED_DATASETS_PATH + ) + download_config = DownloadConfig(cache_dir=dl_cache_dir) + dl_manager = DummyDataGeneratorDownloadManager( + dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config + ) + dataset_builder._split_generators(dl_manager) + mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data + dl_manager.auto_generate_dummy_data_folder( + n_lines=self._n_lines, + json_field=self._json_field, + xml_tag=self._xml_tag, + match_text_files=self._match_text_files, + encoding=self._encoding, + ) + if not keep_uncompressed: + path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name) + dl_manager.compress_autogenerated_dummy_data(path_do_dataset) + # now test that the dummy_data.zip file actually works + mock_dl_manager.load_existing_dummy_data = True # use real dummy data + n_examples_per_split = {} + os.makedirs(dataset_builder._cache_dir, exist_ok=True) + try: + split_generators = dataset_builder._split_generators(mock_dl_manager) + for split_generator in split_generators: + dataset_builder._prepare_split(split_generator, check_duplicate_keys=False) + n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples + except OSError as e: + logger.error( + f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n" + + str(e) + ) + return False + else: + if all(n_examples > 0 for n_examples in n_examples_per_split.values()): + logger.warning( + f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''." + ) + return True + else: + empty_splits = [ + split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0 + ] + logger.warning( + f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''." + ) + return False + else: + generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) + logger.info( + f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. " + "Please compress this directory into a zip file to use it for dummy data tests." + ) + + def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager): + dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) + logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ") + os.makedirs(dummy_data_folder, exist_ok=True) + + try: + generator_splits = dataset_builder._split_generators(mock_dl_manager) + except FileNotFoundError as e: + print( + f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}." + ) + + files_to_create = set() + split_names = [] + dummy_file_name = mock_dl_manager.dummy_file_name + + for split in generator_splits: + logger.info(f"Collecting dummy data file paths to create for {split.name}") + split_names.append(split.name) + gen_kwargs = split.gen_kwargs + generator = dataset_builder._generate_examples(**gen_kwargs) + + try: + dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n" + config_string = ( + f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else "" + ) + dummy_data_guidance_print += ( + "- In order to create the dummy data for " + + config_string + + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n" + ) + + # trigger generate function + for key, record in generator: + pass + + dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n" + + except FileNotFoundError as e: + files_to_create.add(e.filename) + + split_names = ", ".join(split_names) + if len(files_to_create) > 0: + # no glob.glob(...) in `_generate_examples(...)` + if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: + dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n" + files_string = dummy_file_name + else: + files_string = ", ".join(files_to_create) + dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n" + + dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n" + + dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n" + + if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: + dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n" + + dummy_data_guidance_print += ( + f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n" + ) + + dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" + else: + dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n" + + dummy_data_guidance_print += ( + f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n" + ) + + dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" + + dummy_data_guidance_print += ( + f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n" + ) + + dummy_data_guidance_print += 83 * "=" + "\n" + + print(dummy_data_guidance_print) diff --git a/venv/lib/python3.10/site-packages/datasets/commands/env.py b/venv/lib/python3.10/site-packages/datasets/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..40b2a3654c8f1c6c080222a80b12d108972a5dc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/env.py @@ -0,0 +1,41 @@ +import platform +from argparse import ArgumentParser + +import fsspec +import huggingface_hub +import pandas +import pyarrow + +from datasets import __version__ as version +from datasets.commands import BaseDatasetsCLICommand + + +def info_command_factory(_): + return EnvironmentCommand() + + +class EnvironmentCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + download_parser = parser.add_parser("env", help="Print relevant system environment info.") + download_parser.set_defaults(func=info_command_factory) + + def run(self): + info = { + "`datasets` version": version, + "Platform": platform.platform(), + "Python version": platform.python_version(), + "`huggingface_hub` version": huggingface_hub.__version__, + "PyArrow version": pyarrow.__version__, + "Pandas version": pandas.__version__, + "`fsspec` version": fsspec.__version__, + } + + print("\nCopy-and-paste the text below in your GitHub issue.\n") + print(self.format_dict(info)) + + return info + + @staticmethod + def format_dict(d): + return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" diff --git a/venv/lib/python3.10/site-packages/datasets/commands/run_beam.py b/venv/lib/python3.10/site-packages/datasets/commands/run_beam.py new file mode 100644 index 0000000000000000000000000000000000000000..1d59e464344076315a2f51c608e04502189a3c2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/run_beam.py @@ -0,0 +1,168 @@ +import os +from argparse import ArgumentParser +from pathlib import Path +from shutil import copyfile +from typing import List + +from datasets import config +from datasets.builder import DatasetBuilder +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_config import DownloadConfig +from datasets.download.download_manager import DownloadMode +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.deprecation_utils import deprecated +from datasets.utils.info_utils import VerificationMode + + +def run_beam_command_factory(args, **kwargs): + return RunBeamCommand( + args.dataset, + args.name, + args.cache_dir, + args.beam_pipeline_options, + args.data_dir, + args.all_configs, + args.save_info or args.save_infos, + args.ignore_verifications, + args.force_redownload, + **kwargs, + ) + + +@deprecated( + "`BeamBasedBuilder` and `datasets-cli run_beam` are deprecated and will be removed in v3.0.0. Please use `GeneratorBasedBuilder` or `ArrowBasedBuilder` instead." +) +class RunBeamCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline") + run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download") + run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name") + run_beam_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory where the datasets are stored", + ) + run_beam_parser.add_argument( + "--beam_pipeline_options", + type=str, + default="", + help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`", + ) + run_beam_parser.add_argument( + "--data_dir", + type=str, + default=None, + help="Can be used to specify a manual directory to get the files from", + ) + run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") + run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file") + run_beam_parser.add_argument( + "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks" + ) + run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") + # aliases + run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info") + run_beam_parser.set_defaults(func=run_beam_command_factory) + + def __init__( + self, + dataset: str, + name: str, + cache_dir: str, + beam_pipeline_options: str, + data_dir: str, + all_configs: bool, + save_infos: bool, + ignore_verifications: bool, + force_redownload: bool, + **config_kwargs, + ): + self._dataset = dataset + self._name = name + self._cache_dir = cache_dir + self._beam_pipeline_options = beam_pipeline_options + self._data_dir = data_dir + self._all_configs = all_configs + self._save_infos = save_infos + self._ignore_verifications = ignore_verifications + self._force_redownload = force_redownload + self._config_kwargs = config_kwargs + + def run(self): + import apache_beam as beam + + if self._name is not None and self._all_configs: + print("Both parameters `name` and `all_configs` can't be used at once.") + exit(1) + path, config_name = self._dataset, self._name + dataset_module = dataset_module_factory(path) + builder_cls = import_main_class(dataset_module.module_path) + builders: List[DatasetBuilder] = [] + if self._beam_pipeline_options: + beam_options = beam.options.pipeline_options.PipelineOptions( + flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt] + ) + else: + beam_options = None + if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0: + for builder_config in builder_cls.BUILDER_CONFIGS: + builders.append( + builder_cls( + config_name=builder_config.name, + data_dir=self._data_dir, + hash=dataset_module.hash, + beam_options=beam_options, + cache_dir=self._cache_dir, + base_path=dataset_module.builder_kwargs.get("base_path"), + ) + ) + else: + builders.append( + builder_cls( + config_name=config_name, + data_dir=self._data_dir, + beam_options=beam_options, + cache_dir=self._cache_dir, + base_path=dataset_module.builder_kwargs.get("base_path"), + **self._config_kwargs, + ) + ) + + for builder in builders: + builder.download_and_prepare( + download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS + if not self._force_redownload + else DownloadMode.FORCE_REDOWNLOAD, + download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH), + verification_mode=VerificationMode.NO_CHECKS + if self._ignore_verifications + else VerificationMode.ALL_CHECKS, + ) + if self._save_infos: + builder._save_infos() + + print("Apache beam run successful.") + + # If save_infos=True, the dataset infos file is created next to the loaded module file. + # Let's move it to the original directory of the dataset script, to allow the user to + # upload them on S3 at the same time afterwards. + if self._save_infos: + dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) + + name = Path(path).name + ".py" + + combined_path = os.path.join(path, name) + if os.path.isfile(path): + dataset_dir = os.path.dirname(path) + elif os.path.isfile(combined_path): + dataset_dir = path + else: # in case of a remote dataset + print(f"Dataset Infos file saved at {dataset_infos_path}") + exit(1) + + # Move datasetinfo back to the user + user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME) + copyfile(dataset_infos_path, user_dataset_infos_path) + print(f"Dataset Infos file saved at {user_dataset_infos_path}") diff --git a/venv/lib/python3.10/site-packages/datasets/commands/test.py b/venv/lib/python3.10/site-packages/datasets/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..da82427e935e270d5b2b5c1958443305c2e90405 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/commands/test.py @@ -0,0 +1,201 @@ +import logging +import os +from argparse import ArgumentParser +from pathlib import Path +from shutil import copyfile, rmtree +from typing import Generator + +import datasets.config +from datasets.builder import DatasetBuilder +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_manager import DownloadMode +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.info_utils import VerificationMode +from datasets.utils.logging import ERROR, get_logger + + +logger = get_logger(__name__) + + +def _test_command_factory(args): + return TestCommand( + args.dataset, + args.name, + args.cache_dir, + args.data_dir, + args.all_configs, + args.save_info or args.save_infos, + args.ignore_verifications, + args.force_redownload, + args.clear_cache, + args.num_proc, + ) + + +class TestCommand(BaseDatasetsCLICommand): + __test__ = False # to tell pytest it's not a test class + + @staticmethod + def register_subcommand(parser: ArgumentParser): + test_parser = parser.add_parser("test", help="Test dataset implementation.") + test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") + test_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory where the datasets are stored.", + ) + test_parser.add_argument( + "--data_dir", + type=str, + default=None, + help="Can be used to specify a manual directory to get the files from.", + ) + test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") + test_parser.add_argument( + "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)" + ) + test_parser.add_argument( + "--ignore_verifications", + action="store_true", + help="Run the test without checksums and splits checks.", + ) + test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") + test_parser.add_argument( + "--clear_cache", + action="store_true", + help="Remove downloaded files and cached datasets after each config test", + ) + test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes") + # aliases + test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info") + test_parser.add_argument("dataset", type=str, help="Name of the dataset to download") + test_parser.set_defaults(func=_test_command_factory) + + def __init__( + self, + dataset: str, + name: str, + cache_dir: str, + data_dir: str, + all_configs: bool, + save_infos: bool, + ignore_verifications: bool, + force_redownload: bool, + clear_cache: bool, + num_proc: int, + ): + self._dataset = dataset + self._name = name + self._cache_dir = cache_dir + self._data_dir = data_dir + self._all_configs = all_configs + self._save_infos = save_infos + self._ignore_verifications = ignore_verifications + self._force_redownload = force_redownload + self._clear_cache = clear_cache + self._num_proc = num_proc + if clear_cache and not cache_dir: + print( + "When --clear_cache is used, specifying a cache directory is mandatory.\n" + "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" + "Please provide a --cache_dir that will be used to test the dataset script." + ) + exit(1) + if save_infos: + self._ignore_verifications = True + + def run(self): + logging.getLogger("filelock").setLevel(ERROR) + if self._name is not None and self._all_configs: + print("Both parameters `config` and `all_configs` can't be used at once.") + exit(1) + path, config_name = self._dataset, self._name + module = dataset_module_factory(path) + builder_cls = import_main_class(module.module_path) + n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1 + + def get_builders() -> Generator[DatasetBuilder, None, None]: + if self._all_configs and builder_cls.BUILDER_CONFIGS: + for i, config in enumerate(builder_cls.BUILDER_CONFIGS): + if "config_name" in module.builder_kwargs: + yield builder_cls( + cache_dir=self._cache_dir, + data_dir=self._data_dir, + **module.builder_kwargs, + ) + else: + yield builder_cls( + config_name=config.name, + cache_dir=self._cache_dir, + data_dir=self._data_dir, + **module.builder_kwargs, + ) + else: + if "config_name" in module.builder_kwargs: + yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs) + else: + yield builder_cls( + config_name=config_name, + cache_dir=self._cache_dir, + data_dir=self._data_dir, + **module.builder_kwargs, + ) + + for j, builder in enumerate(get_builders()): + print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") + builder._record_infos = os.path.exists( + os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) + ) # record checksums only if we need to update a (deprecated) dataset_infos.json + builder.download_and_prepare( + download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS + if not self._force_redownload + else DownloadMode.FORCE_REDOWNLOAD, + verification_mode=VerificationMode.NO_CHECKS + if self._ignore_verifications + else VerificationMode.ALL_CHECKS, + try_from_hf_gcs=False, + num_proc=self._num_proc, + ) + builder.as_dataset() + if self._save_infos: + builder._save_infos() + + # If save_infos=True, the dataset card (README.md) is created next to the loaded module file. + # The dataset_infos are saved in the YAML part of the README.md + + # Let's move it to the original directory of the dataset script, to allow the user to + # upload them on S3 at the same time afterwards. + if self._save_infos: + dataset_readme_path = os.path.join( + builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME + ) + name = Path(path).name + ".py" + combined_path = os.path.join(path, name) + if os.path.isfile(path): + dataset_dir = os.path.dirname(path) + elif os.path.isfile(combined_path): + dataset_dir = path + elif os.path.isdir(path): # for local directories containing only data files + dataset_dir = path + else: # in case of a remote dataset + dataset_dir = None + print(f"Dataset card saved at {dataset_readme_path}") + + # Move dataset_info back to the user + if dataset_dir is not None: + user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME) + copyfile(dataset_readme_path, user_dataset_readme_path) + print(f"Dataset card saved at {user_dataset_readme_path}") + + # If clear_cache=True, the download folder and the dataset builder cache directory are deleted + if self._clear_cache: + if os.path.isdir(builder._cache_dir): + logger.warning(f"Clearing cache at {builder._cache_dir}") + rmtree(builder._cache_dir) + download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR) + if os.path.isdir(download_dir): + logger.warning(f"Clearing cache at {download_dir}") + rmtree(download_dir) + + print("Test successful.") diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__init__.py b/venv/lib/python3.10/site-packages/datasets/formatting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..78f64cfe9126bafebd549265a176a159abe72eeb --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/__init__.py @@ -0,0 +1,139 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ruff: noqa + +from typing import Dict, List, Optional, Type + +from .. import config +from ..utils import logging +from .formatting import ( + ArrowFormatter, + CustomFormatter, + Formatter, + PandasFormatter, + PythonFormatter, + TensorFormatter, + format_table, + query_table, +) +from .np_formatter import NumpyFormatter + + +logger = logging.get_logger(__name__) + +_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} +_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} +_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} + + +def _register_formatter( + formatter_cls: type, + format_type: Optional[str], + aliases: Optional[List[str]] = None, +): + """ + Register a Formatter object using a name and optional aliases. + This function must be used on a Formatter class. + """ + aliases = aliases if aliases is not None else [] + if format_type in _FORMAT_TYPES: + logger.warning( + f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" + ) + _FORMAT_TYPES[format_type] = formatter_cls + for alias in set(aliases + [format_type]): + if alias in _FORMAT_TYPES_ALIASES: + logger.warning( + f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" + ) + _FORMAT_TYPES_ALIASES[alias] = format_type + + +def _register_unavailable_formatter( + unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None +): + """ + Register an unavailable Formatter object using a name and optional aliases. + This function must be used on an Exception object that is raised when trying to get the unavailable formatter. + """ + aliases = aliases if aliases is not None else [] + for alias in set(aliases + [format_type]): + _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error + + +# Here we define all the available formatting functions that can be used by `Dataset.set_format` +_register_formatter(PythonFormatter, None, aliases=["python"]) +_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) +_register_formatter(NumpyFormatter, "numpy", aliases=["np"]) +_register_formatter(PandasFormatter, "pandas", aliases=["pd"]) +_register_formatter(CustomFormatter, "custom") + +if config.POLARS_AVAILABLE: + from .polars_formatter import PolarsFormatter + + _register_formatter(PolarsFormatter, "polars", aliases=["pl"]) +else: + _polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.") + _register_unavailable_formatter(_polars_error, "polars", aliases=["pl"]) + +if config.TORCH_AVAILABLE: + from .torch_formatter import TorchFormatter + + _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) +else: + _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") + _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) + +if config.TF_AVAILABLE: + from .tf_formatter import TFFormatter + + _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) +else: + _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") + _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) + +if config.JAX_AVAILABLE: + from .jax_formatter import JaxFormatter + + _register_formatter(JaxFormatter, "jax", aliases=[]) +else: + _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") + _register_unavailable_formatter(_jax_error, "jax", aliases=[]) + + +def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: + """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" + if format_type in _FORMAT_TYPES_ALIASES: + return _FORMAT_TYPES_ALIASES[format_type] + else: + return format_type + + +def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: + """ + Factory function to get a Formatter given its type name and keyword arguments. + A formatter is an object that extracts and formats data from pyarrow table. + It defines the formatting for rows, colums and batches. + If the formatter for a given type name doesn't exist or is not available, an error is raised. + """ + format_type = get_format_type_from_alias(format_type) + if format_type in _FORMAT_TYPES: + return _FORMAT_TYPES[format_type](**format_kwargs) + if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: + raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] + else: + raise ValueError( + f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" + ) diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de8a18e744e52ddae939d2b84d3a78f0956c75a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55c8a24084e507ac6a43558a7f5478226955ef8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd63970ccc85dc0d8ca37195dd6bf925e4bd270d Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8062dea0d1a2687d1ae2903314a42984f6ebf1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bae8ac1fdd5e7a15718b36e56a970114247d2bd4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea19308d5fd35350e5f006edd567f3643aea149 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49355486d79b4ea56e7be7fa3516cfea8e509342 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/formatting.py b/venv/lib/python3.10/site-packages/datasets/formatting/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..9570fb1808ca2a46b35da691c033802cd6eb601b --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/formatting.py @@ -0,0 +1,653 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator +from collections.abc import Mapping, MutableMapping +from functools import partial + +# Lint as: python3 +from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + +import numpy as np +import pandas as pd +import pyarrow as pa +from packaging import version + +from .. import config +from ..features import Features +from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper +from ..table import Table +from ..utils.py_utils import no_op_if_value_is_null + + +T = TypeVar("T") + +RowFormat = TypeVar("RowFormat") +ColumnFormat = TypeVar("ColumnFormat") +BatchFormat = TypeVar("BatchFormat") + + +def _is_range_contiguous(key: range) -> bool: + return key.step == 1 and key.stop >= key.start + + +def _raise_bad_key_type(key: Any): + raise TypeError( + f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." + ) + + +def _query_table_with_indices_mapping( + table: Table, key: Union[int, slice, range, str, Iterable], indices: Table +) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into + account a shuffling or an indices selection for example. + The indices table must contain one column named "indices" of type uint64. + """ + if isinstance(key, int): + key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() + return _query_table(table, key) + if isinstance(key, slice): + key = range(*key.indices(indices.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return _query_table( + table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)] + ) + else: + pass # treat as an iterable + if isinstance(key, str): + table = table.select([key]) + return _query_table(table, indices.column(0).to_pylist()) + if isinstance(key, Iterable): + return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) + + _raise_bad_key_type(key) + + +def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + """ + if isinstance(key, int): + return table.fast_slice(key % table.num_rows, 1) + if isinstance(key, slice): + key = range(*key.indices(table.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return table.fast_slice(key.start, key.stop - key.start) + else: + pass # treat as an iterable + if isinstance(key, str): + return table.table.drop([column for column in table.column_names if column != key]) + if isinstance(key, Iterable): + key = np.fromiter(key, np.int64) + if len(key) == 0: + return table.table.slice(0, 0) + # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773) + return table.fast_gather(key % table.num_rows) + + _raise_bad_key_type(key) + + +def _is_array_with_nulls(pa_array: pa.Array) -> bool: + return pa_array.null_count > 0 + + +class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + Arrow extractor are used to extract data from pyarrow tables. + It makes it possible to extract rows, columns and batches. + These three extractions types have to be implemented. + """ + + def extract_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def extract_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def extract_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: + """Return the first element of a batch (dict) as a row (dict)""" + return {key: array[0] for key, array in py_dict.items()} + + +class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): + def extract_row(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + def extract_column(self, pa_table: pa.Table) -> pa.Array: + return pa_table.column(0) + + def extract_batch(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + +class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(pa_table.to_pydict()) + + def extract_column(self, pa_table: pa.Table) -> list: + return pa_table.column(0).to_pylist() + + def extract_batch(self, pa_table: pa.Table) -> dict: + return pa_table.to_pydict() + + +class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): + def __init__(self, **np_array_kwargs): + self.np_array_kwargs = np_array_kwargs + + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(self.extract_batch(pa_table)) + + def extract_column(self, pa_table: pa.Table) -> np.ndarray: + return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) + + def extract_batch(self, pa_table: pa.Table) -> dict: + return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} + + def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: + if isinstance(pa_array, pa.ChunkedArray): + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and all( + not _is_array_with_nulls(chunk) for chunk in pa_array.chunks + ) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() + if len(array) > 0: + if any( + (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape)) + or (isinstance(x, float) and np.isnan(x)) + for x in array + ): + return np.array(array, copy=False, dtype=object) + return np.array(array, copy=False) + + +class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): + def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) + + def extract_column(self, pa_table: pa.Table) -> pd.Series: + return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] + + def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.to_pandas(types_mapper=pandas_types_mapper) + + +class PythonFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: dict) -> dict: + return self.features.decode_example(row) if self.features else row + + def decode_column(self, column: list, column_name: str) -> list: + return self.features.decode_column(column, column_name) if self.features else column + + def decode_batch(self, batch: dict) -> dict: + return self.features.decode_batch(batch) if self.features else batch + + +class PandasFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: + decode = ( + { + column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) + for column_name, feature in self.features.items() + if self.features._column_requires_decoding[column_name] + } + if self.features + else {} + ) + if decode: + row[list(decode.keys())] = row.transform(decode) + return row + + def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: + decode = ( + no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) + if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] + else None + ) + if decode: + column = column.transform(decode) + return column + + def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: + return self.decode_row(batch) + + +class LazyDict(MutableMapping): + """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary.""" + + def __init__(self, pa_table: pa.Table, formatter: "Formatter"): + self.pa_table = pa_table + self.formatter = formatter + + self.data = {key: None for key in pa_table.column_names} + self.keys_to_format = set(self.data.keys()) + + def __len__(self): + return len(self.data) + + def __getitem__(self, key): + value = self.data[key] + if key in self.keys_to_format: + value = self.format(key) + self.data[key] = value + self.keys_to_format.remove(key) + return value + + def __setitem__(self, key, value): + if key in self.keys_to_format: + self.keys_to_format.remove(key) + self.data[key] = value + + def __delitem__(self, key) -> None: + if key in self.keys_to_format: + self.keys_to_format.remove(key) + del self.data[key] + + def __iter__(self): + return iter(self.data) + + def __contains__(self, key): + return key in self.data + + def __repr__(self): + self._format_all() + return repr(self.data) + + if config.PY_VERSION >= version.parse("3.9"): + # merging with the union ("|") operator is supported in Python 3.9+ + + def __or__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = inst.data | other.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = inst.data | other + return inst + return NotImplemented + + def __ror__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = other.data | inst.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = other | inst.data + return inst + return NotImplemented + + def __ior__(self, other): + if isinstance(other, LazyDict): + other = other.copy() + other._format_all() + self.keys_to_format -= other.data.keys() + self.data |= other.data + else: + self.keys_to_format -= other.keys() + self.data |= other + return self + + def __copy__(self): + # Identical to `UserDict.__copy__` + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy() + return inst + + def copy(self): + import copy + + return copy.copy(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + raise NotImplementedError + + def format(self, key): + raise NotImplementedError + + def _format_all(self): + for key in self.keys_to_format: + self.data[key] = self.format(key) + self.keys_to_format.clear() + + +class LazyRow(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key]))[0] + + +class LazyBatch(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key])) + + +class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + A formatter is an object that extracts and formats data from pyarrow tables. + It defines the formatting for rows, columns and batches. + """ + + simple_arrow_extractor = SimpleArrowExtractor + python_arrow_extractor = PythonArrowExtractor + numpy_arrow_extractor = NumpyArrowExtractor + pandas_arrow_extractor = PandasArrowExtractor + + def __init__(self, features: Optional[Features] = None): + self.features = features + self.python_features_decoder = PythonFeaturesDecoder(self.features) + self.pandas_features_decoder = PandasFeaturesDecoder(self.features) + + def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: + if query_type == "row": + return self.format_row(pa_table) + elif query_type == "column": + return self.format_column(pa_table) + elif query_type == "batch": + return self.format_batch(pa_table) + + def format_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def format_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): + def recursive_tensorize(self, data_struct: dict): + raise NotImplementedError + + +class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): + def format_row(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_row(pa_table) + + def format_column(self, pa_table: pa.Table) -> pa.Array: + return self.simple_arrow_extractor().extract_column(pa_table) + + def format_batch(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_batch(pa_table) + + +class PythonFormatter(Formatter[Mapping, list, Mapping]): + def __init__(self, features=None, lazy=False): + super().__init__(features) + self.lazy = lazy + + def format_row(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyRow(pa_table, self) + row = self.python_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> list: + column = self.python_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyBatch(pa_table, self) + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return batch + + +class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): + def format_row(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_row(pa_table) + row = self.pandas_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> pd.Series: + column = self.pandas_arrow_extractor().extract_column(pa_table) + column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_batch(pa_table) + row = self.pandas_features_decoder.decode_batch(row) + return row + + +class CustomFormatter(Formatter[dict, ColumnFormat, dict]): + """ + A user-defined custom formatter function defined by a ``transform``. + The transform must take as input a batch of data extracted for an arrow table using the python extractor, + and return a batch. + If the output batch is not a dict, then output_all_columns won't work. + If the ouput batch has several fields, then querying a single column won't work since we don't know which field + to return. + """ + + def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): + super().__init__(features=features) + self.transform = transform + + def format_row(self, pa_table: pa.Table) -> dict: + formatted_batch = self.format_batch(pa_table) + try: + return _unnest(formatted_batch) + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + formatted_batch = self.format_batch(pa_table) + if hasattr(formatted_batch, "keys"): + if len(formatted_batch.keys()) > 1: + raise TypeError( + "Tried to query a column but the custom formatting function returns too many columns. " + f"Only one column was expected but got columns {list(formatted_batch.keys())}." + ) + else: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) + try: + return formatted_batch[pa_table.column_names[0]] + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_batch(self, pa_table: pa.Table) -> dict: + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return self.transform(batch) + + +def _check_valid_column_key(key: str, columns: List[str]) -> None: + if key not in columns: + raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") + + +def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: + if isinstance(key, int): + if (key < 0 and key + size < 0) or (key >= size): + raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") + return + elif isinstance(key, slice): + pass + elif isinstance(key, range): + if len(key) > 0: + _check_valid_index_key(max(key), size=size) + _check_valid_index_key(min(key), size=size) + elif isinstance(key, Iterable): + if len(key) > 0: + _check_valid_index_key(int(max(key)), size=size) + _check_valid_index_key(int(min(key)), size=size) + else: + _raise_bad_key_type(key) + + +def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: + if isinstance(key, int): + return "row" + elif isinstance(key, str): + return "column" + elif isinstance(key, (slice, range, Iterable)): + return "batch" + _raise_bad_key_type(key) + + +def query_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + indices: Optional[Table] = None, +) -> pa.Table: + """ + Query a Table to extract the subtable that correspond to the given key. + + Args: + table (``datasets.table.Table``): The input Table to query from + key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: + - an integer i: the subtable containing only the i-th row + - a slice [i:j:k]: the subtable containing the rows that correspond to this slice + - a range(i, j, k): the subtable containing the rows that correspond to this range + - a string c: the subtable containing all the rows but only the column c + - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable + indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. + The indices table must contain one column named "indices" of type uint64. + This is used in case of shuffling or rows selection. + + + Returns: + ``pyarrow.Table``: the result of the query on the input table + """ + # Check if key is valid + if not isinstance(key, (int, slice, range, str, Iterable)): + try: + key = operator.index(key) + except TypeError: + _raise_bad_key_type(key) + if isinstance(key, str): + _check_valid_column_key(key, table.column_names) + else: + size = indices.num_rows if indices is not None else table.num_rows + _check_valid_index_key(key, size) + # Query the main table + if indices is None: + pa_subtable = _query_table(table, key) + else: + pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) + return pa_subtable + + +def format_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + formatter: Formatter, + format_columns: Optional[list] = None, + output_all_columns=False, +): + """ + Format a Table depending on the key that was used and a Formatter object. + + Args: + table (``datasets.table.Table``): The input Table to format + key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats + the table as either a row, a column or a batch. + formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as + PythonFormatter, NumpyFormatter, etc. + format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the + given formatter. Other columns are discarded (unless ``output_all_columns`` is True) + output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns + that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used. + + + Returns: + A row, column or batch formatted object defined by the Formatter: + - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column. + - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column. + - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column. + - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column. + - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column. + """ + if isinstance(table, Table): + pa_table = table.table + else: + pa_table = table + query_type = key_to_query_type(key) + python_formatter = PythonFormatter(features=formatter.features) + if format_columns is None: + return formatter(pa_table, query_type=query_type) + elif query_type == "column": + if key in format_columns: + return formatter(pa_table, query_type) + else: + return python_formatter(pa_table, query_type=query_type) + else: + pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns) + formatted_output = formatter(pa_table_to_format, query_type=query_type) + if output_all_columns: + if isinstance(formatted_output, MutableMapping): + pa_table_with_remaining_columns = pa_table.drop( + col for col in pa_table.column_names if col in format_columns + ) + remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) + formatted_output.update(remaining_columns_dict) + else: + raise TypeError( + f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}" + ) + return formatted_output diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py b/venv/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..8035341c5cd2794345163b388945b3a092708916 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py @@ -0,0 +1,160 @@ +# Copyright 2021 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING, Dict, Optional + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import jax + import jaxlib + +logger = get_logger() + +DEVICE_MAPPING: Optional[dict] = None + + +class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): + def __init__(self, features=None, device=None, **jnp_array_kwargs): + super().__init__(features=features) + import jax + from jaxlib.xla_client import Device + + if isinstance(device, Device): + raise ValueError( + f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " + "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " + "the device with `str()` to get its string identifier that will be internally mapped " + "to the actual `jaxlib.xla_extension.Device`." + ) + self.device = device if isinstance(device, str) else str(jax.devices()[0]) + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + if self.device not in list(DEVICE_MAPPING.keys()): + logger.warning( + f"Device with string identifier {self.device} not listed among the available " + f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " + f"device: {str(jax.devices()[0])}." + ) + self.device = str(jax.devices()[0]) + self.jnp_array_kwargs = jnp_array_kwargs + + @staticmethod + def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]: + import jax + + return {str(device): device for device in jax.devices()} + + def _consolidate(self, column): + import jax + import jax.numpy as jnp + + if isinstance(column, list) and column: + if all( + isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return jnp.stack(column, axis=0) + return column + + def _tensorize(self, value): + import jax + import jax.numpy as jnp + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + # the default int precision depends on the jax config + # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision + if jax.config.jax_enable_x64: + default_dtype = {"dtype": jnp.int64} + else: + default_dtype = {"dtype": jnp.int32} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": jnp.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + + with jax.default_device(DEVICE_MAPPING[self.device]): + # calling jnp.array on a np.ndarray does copy the data + # see https://github.com/google/jax/issues/4486 + return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + import jax + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "jax.Array": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/np_formatter.py b/venv/lib/python3.10/site-packages/datasets/formatting/np_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..95bcff2b51728fdd9647dad382639724df163ce2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/np_formatter.py @@ -0,0 +1,106 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from collections.abc import Mapping + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + + def _consolidate(self, column): + if isinstance(column, list): + if column and all( + isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return np.stack(column) + else: + # don't use np.array(column, dtype=object) + # since it fails in certain cases + # see https://stackoverflow.com/q/51005699 + out = np.empty(len(column), dtype=object) + out[:] = column + return out + return column + + def _tensorize(self, value): + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value + elif isinstance(value, np.number): + return value + + default_dtype = {} + + if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": np.int64} + elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": np.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + return np.asarray(value, **self.np_array_kwargs) + + return np.asarray(value, **{**default_dtype, **self.np_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + if isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> np.ndarray: + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py b/venv/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..543bde52dd0fb29f2732bce5ee2edcdf9f253109 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py @@ -0,0 +1,122 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from collections.abc import Mapping +from functools import partial +from typing import TYPE_CHECKING, Optional + +import pyarrow as pa + +from .. import config +from ..features import Features +from ..features.features import decode_nested_example +from ..utils.py_utils import no_op_if_value_is_null +from .formatting import BaseArrowExtractor, TensorFormatter + + +if TYPE_CHECKING: + import polars as pl + + +class PolarsArrowExtractor(BaseArrowExtractor["pl.DataFrame", "pl.Series", "pl.DataFrame"]): + def extract_row(self, pa_table: pa.Table) -> "pl.DataFrame": + if config.POLARS_AVAILABLE: + if "polars" not in sys.modules: + import polars + else: + polars = sys.modules["polars"] + + return polars.from_arrow(pa_table.slice(length=1)) + else: + raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") + + def extract_column(self, pa_table: pa.Table) -> "pl.Series": + if config.POLARS_AVAILABLE: + if "polars" not in sys.modules: + import polars + else: + polars = sys.modules["polars"] + + return polars.from_arrow(pa_table.select([0]))[pa_table.column_names[0]] + else: + raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") + + def extract_batch(self, pa_table: pa.Table) -> "pl.DataFrame": + if config.POLARS_AVAILABLE: + if "polars" not in sys.modules: + import polars + else: + polars = sys.modules["polars"] + + return polars.from_arrow(pa_table) + else: + raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") + + +class PolarsFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + import polars as pl # noqa: F401 - import pl at initialization + + def decode_row(self, row: "pl.DataFrame") -> "pl.DataFrame": + decode = ( + { + column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) + for column_name, feature in self.features.items() + if self.features._column_requires_decoding[column_name] + } + if self.features + else {} + ) + if decode: + row[list(decode.keys())] = row.map_rows(decode) + return row + + def decode_column(self, column: "pl.Series", column_name: str) -> "pl.Series": + decode = ( + no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) + if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] + else None + ) + if decode: + column = column.map_elements(decode) + return column + + def decode_batch(self, batch: "pl.DataFrame") -> "pl.DataFrame": + return self.decode_row(batch) + + +class PolarsFormatter(TensorFormatter[Mapping, "pl.DataFrame", Mapping]): + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + self.polars_arrow_extractor = PolarsArrowExtractor + self.polars_features_decoder = PolarsFeaturesDecoder(features) + import polars as pl # noqa: F401 - import pl at initialization + + def format_row(self, pa_table: pa.Table) -> "pl.DataFrame": + row = self.polars_arrow_extractor().extract_row(pa_table) + row = self.polars_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> "pl.Series": + column = self.polars_arrow_extractor().extract_column(pa_table) + column = self.polars_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> "pl.DataFrame": + row = self.polars_arrow_extractor().extract_batch(pa_table) + row = self.polars_features_decoder.decode_batch(row) + return row diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py b/venv/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..adb15cda3815d77fa0272562e83fda029d1babee --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py @@ -0,0 +1,115 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import tensorflow as tf + + +class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): + def __init__(self, features=None, **tf_tensor_kwargs): + super().__init__(features=features) + self.tf_tensor_kwargs = tf_tensor_kwargs + import tensorflow as tf # noqa: F401 - import tf at initialization + + def _consolidate(self, column): + import tensorflow as tf + + if isinstance(column, list) and column: + if all( + isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return tf.stack(column) + elif all( + isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype + for x in column + ): + # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated + return tf.ragged.stack(column) + + return column + + def _tensorize(self, value): + import tensorflow as tf + + if value is None: + return value + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": tf.int64} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": tf.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import tensorflow as tf + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "tf.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/venv/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py b/venv/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..8efe759a1443a74b94d59fe38944f6527ac18cf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py @@ -0,0 +1,115 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import torch + + +class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]): + def __init__(self, features=None, **torch_tensor_kwargs): + super().__init__(features=features) + self.torch_tensor_kwargs = torch_tensor_kwargs + import torch # noqa import torch at initialization + + def _consolidate(self, column): + import torch + + if isinstance(column, list) and column: + if all( + isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype + for x in column + ): + return torch.stack(column) + return column + + def _tensorize(self, value): + import torch + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": torch.int64} + + # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility. + # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss. + if value.dtype in [np.uint16, np.uint32]: + value = value.astype(np.int64) + + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": torch.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + if value.ndim == 2: + value = value[:, :, np.newaxis] + + value = value.transpose((2, 0, 1)) + return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import torch + + # support for torch, tf, jax etc. + if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "torch.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/venv/lib/python3.10/site-packages/datasets/io/__init__.py b/venv/lib/python3.10/site-packages/datasets/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a5ecf5347c7ca499758612b854c3146d349ecf4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32f89e6dbd07e9b70d02848f70d2186d21a7982e Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb2cab2fd1562a8e5d91d1f68c5605093979286a Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81c79c13e1e5dd0267d26bd6e688c5037989d58f Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d30c8bdd0c8859c05c95ce9e14057b224c8d0de5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94036ba9e84c1d201dfdf9ff254a36c0db0b9751 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87563d6fb74317a063955e03048becbfe277e5e6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18bf4efedb49cd1a99faac5c9cb5b87786e50d2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d438e0bb6b0fcb89d3fe05dc351d961c91fd1e0f Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/io/abc.py b/venv/lib/python3.10/site-packages/datasets/io/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..a1913cc20e3fd748ef912e2fb3d7c1e18f16ac8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/abc.py @@ -0,0 +1,53 @@ +from abc import ABC, abstractmethod +from typing import Optional, Union + +from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit +from ..utils.typing import NestedDataStructureLike, PathLike + + +class AbstractDatasetReader(ABC): + def __init__( + self, + path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None, + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + self.path_or_paths = path_or_paths + self.split = split if split or isinstance(path_or_paths, dict) else "train" + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: + pass + + +class AbstractDatasetInputStream(ABC): + def __init__( + self, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, IterableDataset]: + pass diff --git a/venv/lib/python3.10/site-packages/datasets/io/csv.py b/venv/lib/python3.10/site-packages/datasets/io/csv.py new file mode 100644 index 0000000000000000000000000000000000000000..4ac2ea1135b579ae0f38f7dafc73ae5c2db1a6b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/csv.py @@ -0,0 +1,145 @@ +import multiprocessing +import os +from typing import BinaryIO, Optional, Union + +import fsspec + +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.csv.csv import Csv +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class CsvDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Csv( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class CsvDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + storage_options: Optional[dict] = None, + **to_csv_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = "utf-8" + self.storage_options = storage_options or {} + self.to_csv_kwargs = to_csv_kwargs + + def write(self) -> int: + _ = self.to_csv_kwargs.pop("path_or_buf", None) + header = self.to_csv_kwargs.pop("header", True) + index = self.to_csv_kwargs.pop("index", False) + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer: + written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs) + else: + written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs) + return written + + def _batch_csv(self, args): + offset, header, index, to_csv_kwargs = args + + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + csv_str = batch.to_pandas().to_csv( + path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs + ) + return csv_str.encode(self.encoding) + + def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int: + """Writes the pyarrow table as CSV to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating CSV from Arrow format", + ): + csv_str = self._batch_csv((offset, header, index, to_csv_kwargs)) + written += file_obj.write(csv_str) + + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for csv_str in hf_tqdm( + pool.imap( + self._batch_csv, + [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating CSV from Arrow format", + ): + written += file_obj.write(csv_str) + + return written diff --git a/venv/lib/python3.10/site-packages/datasets/io/generator.py b/venv/lib/python3.10/site-packages/datasets/io/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..2566d5fcdcca6525f67afe9106dcf9ba2842d92c --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/generator.py @@ -0,0 +1,57 @@ +from typing import Callable, Optional + +from .. import Features +from ..packaged_modules.generator.generator import Generator +from .abc import AbstractDatasetInputStream + + +class GeneratorDatasetInputStream(AbstractDatasetInputStream): + def __init__( + self, + generator: Callable, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + gen_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + self.builder = Generator( + cache_dir=cache_dir, + features=features, + generator=generator, + gen_kwargs=gen_kwargs, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split="train") + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset diff --git a/venv/lib/python3.10/site-packages/datasets/io/json.py b/venv/lib/python3.10/site-packages/datasets/io/json.py new file mode 100644 index 0000000000000000000000000000000000000000..c1d89b0fd04c83394e34104442776ba15628cb58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/json.py @@ -0,0 +1,170 @@ +import multiprocessing +import os +from typing import BinaryIO, Optional, Union + +import fsspec + +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.json.json import Json +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class JsonDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + field: Optional[str] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + self.field = field + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Json( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + field=field, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class JsonDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + storage_options: Optional[dict] = None, + **to_json_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = "utf-8" + self.storage_options = storage_options or {} + self.to_json_kwargs = to_json_kwargs + + def write(self) -> int: + _ = self.to_json_kwargs.pop("path_or_buf", None) + orient = self.to_json_kwargs.pop("orient", "records") + lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False) + if "index" not in self.to_json_kwargs and orient in ["split", "table"]: + self.to_json_kwargs["index"] = False + + # Determine the default compression value based on self.path_or_buf type + default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None + compression = self.to_json_kwargs.pop("compression", default_compression) + + if compression not in [None, "infer", "gzip", "bz2", "xz"]: + raise NotImplementedError(f"`datasets` currently does not support {compression} compression") + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open( + self.path_or_buf, "wb", compression=compression, **(self.storage_options or {}) + ) as buffer: + written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) + else: + if compression: + raise NotImplementedError( + f"The compression parameter is not supported when writing to a buffer, but compression={compression}" + " was passed. Please provide a local path instead." + ) + written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) + return written + + def _batch_json(self, args): + offset, orient, lines, to_json_kwargs = args + + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) + if not json_str.endswith("\n"): + json_str += "\n" + return json_str.encode(self.encoding) + + def _write( + self, + file_obj: BinaryIO, + orient, + lines, + **to_json_kwargs, + ) -> int: + """Writes the pyarrow table as JSON lines to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating json from Arrow format", + ): + json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) + written += file_obj.write(json_str) + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for json_str in hf_tqdm( + pool.imap( + self._batch_json, + [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating json from Arrow format", + ): + written += file_obj.write(json_str) + + return written diff --git a/venv/lib/python3.10/site-packages/datasets/io/parquet.py b/venv/lib/python3.10/site-packages/datasets/io/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..51434106fb1f623500ceabbb6b0b3077fa20eee9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/parquet.py @@ -0,0 +1,158 @@ +import os +from typing import BinaryIO, Optional, Union + +import fsspec +import numpy as np +import pyarrow.parquet as pq + +from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config +from ..features.features import FeatureType, _visit +from ..formatting import query_table +from ..packaged_modules import _PACKAGED_DATASETS_MODULES +from ..packaged_modules.parquet.parquet import Parquet +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +def get_writer_batch_size(features: Features) -> Optional[int]: + """ + Get the writer_batch_size that defines the maximum row group size in the parquet files. + The default in `datasets` is 1,000 but we lower it to 100 for image datasets. + This allows to optimize random access to parquet file, since accessing 1 row requires + to read its entire row group. + + This can be improved to get optimized size for querying/iterating + but at least it matches the dataset viewer expectations on HF. + + Args: + ds_config_info (`datasets.info.DatasetInfo`): + Dataset info from `datasets`. + Returns: + writer_batch_size (`Optional[int]`): + Writer batch size to pass to a dataset builder. + If `None`, then it will use the `datasets` default. + """ + + batch_size = np.inf + + def set_batch_size(feature: FeatureType) -> None: + nonlocal batch_size + if isinstance(feature, Image): + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) + elif isinstance(feature, Audio): + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) + elif isinstance(feature, Value) and feature.dtype == "binary": + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) + + _visit(features, set_batch_size) + + return None if batch_size is np.inf else batch_size + + +class ParquetDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + hash = _PACKAGED_DATASETS_MODULES["parquet"][1] + self.builder = Parquet( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + hash=hash, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class ParquetDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + storage_options: Optional[dict] = None, + **parquet_writer_kwargs, + ): + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size or get_writer_batch_size(dataset.features) + self.storage_options = storage_options or {} + self.parquet_writer_kwargs = parquet_writer_kwargs + + def write(self) -> int: + batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer: + written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) + else: + written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) + return written + + def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: + """Writes the pyarrow table as Parquet to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + _ = parquet_writer_kwargs.pop("path_or_buf", None) + schema = self.dataset.features.arrow_schema + + writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) + + for offset in hf_tqdm( + range(0, len(self.dataset), batch_size), + unit="ba", + desc="Creating parquet from Arrow format", + ): + batch = query_table( + table=self.dataset._data, + key=slice(offset, offset + batch_size), + indices=self.dataset._indices, + ) + writer.write_table(batch) + written += batch.nbytes + writer.close() + return written diff --git a/venv/lib/python3.10/site-packages/datasets/io/spark.py b/venv/lib/python3.10/site-packages/datasets/io/spark.py new file mode 100644 index 0000000000000000000000000000000000000000..7562ba1fb5f77ed8f82374e3021fcb3a93b1da8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/spark.py @@ -0,0 +1,57 @@ +from typing import Optional + +import pyspark + +from .. import Features, NamedSplit +from ..download import DownloadMode +from ..packaged_modules.spark.spark import Spark +from .abc import AbstractDatasetReader + + +class SparkDatasetReader(AbstractDatasetReader): + """A dataset reader that reads from a Spark DataFrame. + + When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be + provided. Streaming is not currently supported. + """ + + def __init__( + self, + df: pyspark.sql.DataFrame, + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + streaming: bool = True, + cache_dir: str = None, + keep_in_memory: bool = False, + working_dir: str = None, + load_from_cache_file: bool = True, + file_format: str = "arrow", + **kwargs, + ): + super().__init__( + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + **kwargs, + ) + self._load_from_cache_file = load_from_cache_file + self._file_format = file_format + self.builder = Spark( + df=df, + features=features, + cache_dir=cache_dir, + working_dir=working_dir, + **kwargs, + ) + + def read(self): + if self.streaming: + return self.builder.as_streaming_dataset(split=self.split) + download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD + self.builder.download_and_prepare( + download_mode=download_mode, + file_format=self._file_format, + ) + return self.builder.as_dataset(split=self.split) diff --git a/venv/lib/python3.10/site-packages/datasets/io/sql.py b/venv/lib/python3.10/site-packages/datasets/io/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..2331e3e6407fa81cd26569306ff2c7952dad4920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/sql.py @@ -0,0 +1,124 @@ +import multiprocessing +from typing import TYPE_CHECKING, Optional, Union + +from .. import Dataset, Features, config +from ..formatting import query_table +from ..packaged_modules.sql.sql import Sql +from ..utils import tqdm as hf_tqdm +from .abc import AbstractDatasetInputStream + + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + + +class SqlDatasetReader(AbstractDatasetInputStream): + def __init__( + self, + sql: Union[str, "sqlalchemy.sql.Selectable"], + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ): + super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs) + self.builder = Sql( + cache_dir=cache_dir, + features=features, + sql=sql, + con=con, + **kwargs, + ) + + def read(self): + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + base_path=base_path, + ) + + # Build dataset for splits + dataset = self.builder.as_dataset( + split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class SqlDatasetWriter: + def __init__( + self, + dataset: Dataset, + name: str, + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_sql_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.name = name + self.con = con + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.to_sql_kwargs = to_sql_kwargs + + def write(self) -> int: + _ = self.to_sql_kwargs.pop("sql", None) + _ = self.to_sql_kwargs.pop("con", None) + index = self.to_sql_kwargs.pop("index", False) + + written = self._write(index=index, **self.to_sql_kwargs) + return written + + def _batch_sql(self, args): + offset, index, to_sql_kwargs = args + to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + df = batch.to_pandas() + num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs) + return num_rows or len(df) + + def _write(self, index, **to_sql_kwargs) -> int: + """Writes the pyarrow table as SQL to a database. + + Caller is responsible for opening and closing the SQL connection. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating SQL from Arrow format", + ): + written += self._batch_sql((offset, index, to_sql_kwargs)) + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for num_rows in hf_tqdm( + pool.imap( + self._batch_sql, + [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating SQL from Arrow format", + ): + written += num_rows + + return written diff --git a/venv/lib/python3.10/site-packages/datasets/io/text.py b/venv/lib/python3.10/site-packages/datasets/io/text.py new file mode 100644 index 0000000000000000000000000000000000000000..58963f3c7ab1779d79226943e341200b782e1add --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/io/text.py @@ -0,0 +1,60 @@ +from typing import Optional + +from .. import Features, NamedSplit +from ..packaged_modules.text.text import Text +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class TextDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Text( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24ca280c0b0efc95987be57a2349f7ef3fa2d2fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9b8e969b626fbe08da80166726b03ff340ec7b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00f83eee0006232407ee2c6161d6fbd4a562e1f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33fb5db638c6fc1feacd4065657fbb9030a5373c Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..24c32a746e8c15b23d048b39f7a88447ed0a1b2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py @@ -0,0 +1,406 @@ +import collections +import itertools +import os +from dataclasses import dataclass +from typing import List, Optional, Tuple, Type + +import pandas as pd +import pyarrow as pa +import pyarrow.json as paj + +import datasets +from datasets.features.features import FeatureType +from datasets.tasks.base import TaskTemplate + + +logger = datasets.utils.logging.get_logger(__name__) + + +def count_path_segments(path): + return path.replace("\\", "/").count("/") + + +@dataclass +class FolderBasedBuilderConfig(datasets.BuilderConfig): + """BuilderConfig for AutoFolder.""" + + features: Optional[datasets.Features] = None + drop_labels: bool = None + drop_metadata: bool = None + + +class FolderBasedBuilder(datasets.GeneratorBasedBuilder): + """ + Base class for generic data loaders for vision and image data. + + + Abstract class attributes to be overridden by a child class: + BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) + BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) + BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` + EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files + will be included in a dataset) + CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure + """ + + BASE_FEATURE: Type[FeatureType] + BASE_COLUMN_NAME: str + BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig + EXTENSIONS: List[str] + CLASSIFICATION_TASK: TaskTemplate + + METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + dl_manager.download_config.extract_on_the_fly = True + # Do an early pass if: + # * `drop_labels` is None (default) or False, to infer the class labels + # * `drop_metadata` is None (default) or False, to find the metadata files + do_analyze = not self.config.drop_labels or not self.config.drop_metadata + labels, path_depths = set(), set() + metadata_files = collections.defaultdict(set) + + def analyze(files_or_archives, downloaded_files_or_dirs, split): + if len(downloaded_files_or_dirs) == 0: + return + # The files are separated from the archives at this point, so check the first sample + # to see if it's a file or a directory and iterate accordingly + if os.path.isfile(downloaded_files_or_dirs[0]): + original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs + for original_file, downloaded_file in zip(original_files, downloaded_files): + original_file, downloaded_file = str(original_file), str(downloaded_file) + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(original_file))) + path_depths.add(count_path_segments(original_file)) + elif os.path.basename(original_file) in self.METADATA_FILENAMES: + metadata_files[split].add((original_file, downloaded_file)) + else: + original_file_name = os.path.basename(original_file) + logger.debug( + f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." + ) + else: + archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs + for archive, downloaded_dir in zip(archives, downloaded_dirs): + archive, downloaded_dir = str(archive), str(downloaded_dir) + for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) + path_depths.add(count_path_segments(downloaded_dir_file)) + elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: + metadata_files[split].add((None, downloaded_dir_file)) + else: + archive_file_name = os.path.basename(archive) + original_file_name = os.path.basename(downloaded_dir_file) + logger.debug( + f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." + ) + + data_files = self.config.data_files + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files, archives = self._split_files_and_archives(files) + downloaded_files = dl_manager.download(files) + downloaded_dirs = dl_manager.download_and_extract(archives) + if do_analyze: # drop_metadata is None or False, drop_labels is None or False + logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") + analyze(files, downloaded_files, split_name) + analyze(archives, downloaded_dirs, split_name) + + if metadata_files: + # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False + add_metadata = not self.config.drop_metadata + # if `metadata_files` are found, add labels only if + # `drop_labels` is set up to False explicitly (not-default behavior) + add_labels = self.config.drop_labels is False + else: + # if `metadata_files` are not found, don't add metadata + add_metadata = False + # if `metadata_files` are not found and `drop_labels` is None (default) - + # add labels if files are on the same level in directory hierarchy and there is more than one label + add_labels = ( + (len(labels) > 1 and len(path_depths) == 1) + if self.config.drop_labels is None + else not self.config.drop_labels + ) + + if add_labels: + logger.info("Adding the labels inferred from data directories to the dataset's features...") + if add_metadata: + logger.info("Adding metadata to the dataset...") + else: + add_labels, add_metadata, metadata_files = False, False, {} + + splits.append( + datasets.SplitGenerator( + name=split_name, + gen_kwargs={ + "files": list(zip(files, downloaded_files)) + + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], + "metadata_files": metadata_files, + "split_name": split_name, + "add_labels": add_labels, + "add_metadata": add_metadata, + }, + ) + ) + + if add_metadata: + # Verify that: + # * all metadata files have the same set of features + # * the `file_name` key is one of the metadata keys and is of type string + features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] + + # Check that all metadata files share the same format + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] + for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) + } + if len(metadata_ext) > 1: + raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") + metadata_ext = metadata_ext.pop() + + for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + features_per_metadata_file.append( + (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) + ) + for downloaded_metadata_file, metadata_features in features_per_metadata_file: + if metadata_features != features_per_metadata_file[0][1]: + raise ValueError( + f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" + ) + metadata_features = features_per_metadata_file[0][1] + if "file_name" not in metadata_features: + raise ValueError("`file_name` must be present as dictionary key in metadata files") + if metadata_features["file_name"] != datasets.Value("string"): + raise ValueError("`file_name` key must be a string") + del metadata_features["file_name"] + else: + metadata_features = None + + # Normally, we would do this in _info, but we need to know the labels and/or metadata + # before building the features + if self.config.features is None: + if add_labels: + self.info.features = datasets.Features( + { + self.BASE_COLUMN_NAME: self.BASE_FEATURE(), + "label": datasets.ClassLabel(names=sorted(labels)), + } + ) + self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] + else: + self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) + + if add_metadata: + # Warn if there are duplicated keys in metadata compared to the existing features + # (`BASE_COLUMN_NAME`, optionally "label") + duplicated_keys = set(self.info.features) & set(metadata_features) + if duplicated_keys: + logger.warning( + f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " + f"the features dictionary." + ) + # skip metadata duplicated keys + self.info.features.update( + { + feature: metadata_features[feature] + for feature in metadata_features + if feature not in duplicated_keys + } + ) + + return splits + + def _split_files_and_archives(self, data_files): + files, archives = [], [] + for data_file in data_files: + _, data_file_ext = os.path.splitext(data_file) + if data_file_ext.lower() in self.EXTENSIONS: + files.append(data_file) + elif os.path.basename(data_file) in self.METADATA_FILENAMES: + files.append(data_file) + else: + archives.append(data_file) + return files, archives + + def _read_metadata(self, metadata_file, metadata_ext: str = ""): + if metadata_ext == ".csv": + # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module + return pa.Table.from_pandas(pd.read_csv(metadata_file)) + else: + with open(metadata_file, "rb") as f: + return paj.read_json(f) + + def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): + split_metadata_files = metadata_files.get(split_name, []) + sample_empty_metadata = ( + {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} + ) + last_checked_dir = None + metadata_dir = None + metadata_dict = None + downloaded_metadata_file = None + + metadata_ext = "" + if split_metadata_files: + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files + } + metadata_ext = metadata_ext.pop() + + file_idx = 0 + for original_file, downloaded_file_or_dir in files: + if original_file is not None: + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + # If the file is a file of a needed type, and we've just entered a new directory, + # find the nereast metadata file (by counting path segments) for the directory + current_dir = os.path.dirname(original_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is not None # ignore metadata_files that are inside archives + and not os.path.relpath( + original_file, os.path.dirname(metadata_file_candidate) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + file_relpath = os.path.relpath(original_file, metadata_dir) + file_relpath = file_relpath.replace("\\", "/") + if file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(original_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_file_or_dir, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 + else: + for downloaded_dir_file in downloaded_file_or_dir: + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + current_dir = os.path.dirname(downloaded_dir_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is None # ignore metadata_files that are not inside archives + and not os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(downloaded_metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) + downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") + if downloaded_dir_file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[downloaded_dir_file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_dir_file, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1