applied-ai-018 commited on
Commit
af0b10f
·
verified ·
1 Parent(s): cd0431b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/datasets/commands/__init__.py +13 -0
  30. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/datasets/commands/convert.py +195 -0
  36. venv/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py +156 -0
  37. venv/lib/python3.10/site-packages/datasets/commands/datasets_cli.py +45 -0
  38. venv/lib/python3.10/site-packages/datasets/commands/dummy_data.py +468 -0
  39. venv/lib/python3.10/site-packages/datasets/commands/env.py +41 -0
  40. venv/lib/python3.10/site-packages/datasets/commands/run_beam.py +168 -0
  41. venv/lib/python3.10/site-packages/datasets/commands/test.py +201 -0
  42. venv/lib/python3.10/site-packages/datasets/formatting/__init__.py +139 -0
  43. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/datasets/formatting/formatting.py +653 -0
ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8289f624651c26b48ff5a21d447fab47c63817b5433b361bf73f3038133ac12f
3
+ size 50332843
ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ba335e43797248a530e3eb0108f602b6d515f1c55ae0ec0bbc572f8f75e4cf8
3
+ size 9372
ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4d642574c5598700c678b71beb624e99a140212daab615bc5b243295baa3cb
3
+ size 9387
ckpts/universal/global_step20/zero/24.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:953100e3eaba559ce7382929b9d52d98680a68054aeb16930fdc0278b8db52af
3
+ size 9293
venv/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc ADDED
Binary file (232 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc ADDED
Binary file (73.6 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc ADDED
Binary file (78 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc ADDED
Binary file (9.12 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc ADDED
Binary file (6.79 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc ADDED
Binary file (28.1 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc ADDED
Binary file (98.9 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc ADDED
Binary file (23.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc ADDED
Binary file (91.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc ADDED
Binary file (86.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc ADDED
Binary file (33.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc ADDED
Binary file (4.93 kB). View file
 
venv/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc ADDED
Binary file (75.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from argparse import ArgumentParser
3
+
4
+
5
+ class BaseDatasetsCLICommand(ABC):
6
+ @staticmethod
7
+ @abstractmethod
8
+ def register_subcommand(parser: ArgumentParser):
9
+ raise NotImplementedError()
10
+
11
+ @abstractmethod
12
+ def run(self):
13
+ raise NotImplementedError()
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (812 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/convert.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ from argparse import ArgumentParser, Namespace
5
+
6
+ from datasets.commands import BaseDatasetsCLICommand
7
+ from datasets.utils.logging import get_logger
8
+
9
+
10
+ HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
11
+
12
+ HIGHLIGHT_MESSAGE_POST = """=======
13
+ >>>>>>>
14
+ """
15
+
16
+ TO_HIGHLIGHT = [
17
+ "TextEncoderConfig",
18
+ "ByteTextEncoder",
19
+ "SubwordTextEncoder",
20
+ "encoder_config",
21
+ "maybe_build_from_corpus",
22
+ "manual_dir",
23
+ ]
24
+
25
+ TO_CONVERT = [
26
+ # (pattern, replacement)
27
+ # Order is important here for some replacements
28
+ (r"tfds\.core", r"datasets"),
29
+ (r"tf\.io\.gfile\.GFile", r"open"),
30
+ (r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
31
+ (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
32
+ (r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
33
+ (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
34
+ (r"tfds\.features\.FeaturesDict\(", r"dict("),
35
+ (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
36
+ (r"tfds\.", r"datasets."),
37
+ (r"dl_manager\.manual_dir", r"self.config.data_dir"),
38
+ (r"self\.builder_config", r"self.config"),
39
+ ]
40
+
41
+
42
+ def convert_command_factory(args: Namespace):
43
+ """
44
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
45
+
46
+ Returns: ConvertCommand
47
+ """
48
+ return ConvertCommand(args.tfds_path, args.datasets_directory)
49
+
50
+
51
+ class ConvertCommand(BaseDatasetsCLICommand):
52
+ @staticmethod
53
+ def register_subcommand(parser: ArgumentParser):
54
+ """
55
+ Register this command to argparse so it's available for the datasets-cli
56
+
57
+ Args:
58
+ parser: Root parser to register command-specific arguments
59
+ """
60
+ train_parser = parser.add_parser(
61
+ "convert",
62
+ help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
63
+ )
64
+ train_parser.add_argument(
65
+ "--tfds_path",
66
+ type=str,
67
+ required=True,
68
+ help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
69
+ )
70
+ train_parser.add_argument(
71
+ "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
72
+ )
73
+ train_parser.set_defaults(func=convert_command_factory)
74
+
75
+ def __init__(self, tfds_path: str, datasets_directory: str, *args):
76
+ self._logger = get_logger("datasets-cli/converting")
77
+
78
+ self._tfds_path = tfds_path
79
+ self._datasets_directory = datasets_directory
80
+
81
+ def run(self):
82
+ if os.path.isdir(self._tfds_path):
83
+ abs_tfds_path = os.path.abspath(self._tfds_path)
84
+ elif os.path.isfile(self._tfds_path):
85
+ abs_tfds_path = os.path.dirname(self._tfds_path)
86
+ else:
87
+ raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
88
+
89
+ abs_datasets_path = os.path.abspath(self._datasets_directory)
90
+
91
+ self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
92
+
93
+ utils_files = []
94
+ with_manual_update = []
95
+ imports_to_builder_map = {}
96
+
97
+ if os.path.isdir(self._tfds_path):
98
+ file_names = os.listdir(abs_tfds_path)
99
+ else:
100
+ file_names = [os.path.basename(self._tfds_path)]
101
+
102
+ for f_name in file_names:
103
+ self._logger.info(f"Looking at file {f_name}")
104
+ input_file = os.path.join(abs_tfds_path, f_name)
105
+ output_file = os.path.join(abs_datasets_path, f_name)
106
+
107
+ if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
108
+ self._logger.info("Skipping file")
109
+ continue
110
+
111
+ with open(input_file, encoding="utf-8") as f:
112
+ lines = f.readlines()
113
+
114
+ out_lines = []
115
+ is_builder = False
116
+ needs_manual_update = False
117
+ tfds_imports = []
118
+ for line in lines:
119
+ out_line = line
120
+
121
+ # Convert imports
122
+ if "import tensorflow.compat.v2 as tf" in out_line:
123
+ continue
124
+ elif "@tfds.core" in out_line:
125
+ continue
126
+ elif "builder=self" in out_line:
127
+ continue
128
+ elif "import tensorflow_datasets.public_api as tfds" in out_line:
129
+ out_line = "import datasets\n"
130
+ elif "import tensorflow" in out_line:
131
+ # order is important here
132
+ out_line = ""
133
+ continue
134
+ elif "from absl import logging" in out_line:
135
+ out_line = "from datasets import logging\n"
136
+ elif "getLogger" in out_line:
137
+ out_line = out_line.replace("getLogger", "get_logger")
138
+ elif any(expression in out_line for expression in TO_HIGHLIGHT):
139
+ needs_manual_update = True
140
+ to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
141
+ out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
142
+ out_lines.append(out_line)
143
+ out_lines.append(HIGHLIGHT_MESSAGE_POST)
144
+ continue
145
+ else:
146
+ for pattern, replacement in TO_CONVERT:
147
+ out_line = re.sub(pattern, replacement, out_line)
148
+
149
+ # Take care of saving utilities (to later move them together with main script)
150
+ if "tensorflow_datasets" in out_line:
151
+ match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
152
+ tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
153
+ out_line = "from . import " + match.group(1)
154
+
155
+ # Check we have not forget anything
156
+ if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
157
+ raise ValueError(f"Error converting {out_line.strip()}")
158
+
159
+ if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
160
+ is_builder = True
161
+ out_lines.append(out_line)
162
+
163
+ if is_builder or "wmt" in f_name:
164
+ # We create a new directory for each dataset
165
+ dir_name = f_name.replace(".py", "")
166
+ output_dir = os.path.join(abs_datasets_path, dir_name)
167
+ output_file = os.path.join(output_dir, f_name)
168
+ os.makedirs(output_dir, exist_ok=True)
169
+ self._logger.info(f"Adding directory {output_dir}")
170
+ imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
171
+ else:
172
+ # Utilities will be moved at the end
173
+ utils_files.append(output_file)
174
+
175
+ if needs_manual_update:
176
+ with_manual_update.append(output_file)
177
+
178
+ with open(output_file, "w", encoding="utf-8") as f:
179
+ f.writelines(out_lines)
180
+ self._logger.info(f"Converted in {output_file}")
181
+
182
+ for utils_file in utils_files:
183
+ try:
184
+ f_name = os.path.basename(utils_file)
185
+ dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
186
+ self._logger.info(f"Moving {dest_folder} to {utils_file}")
187
+ shutil.copy(utils_file, dest_folder)
188
+ except KeyError:
189
+ self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
190
+
191
+ if with_manual_update:
192
+ for file_path in with_manual_update:
193
+ self._logger.warning(
194
+ f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
195
+ )
venv/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from argparse import ArgumentParser
3
+ from typing import Optional
4
+
5
+ from huggingface_hub import HfApi, create_branch, get_repo_discussions
6
+
7
+ from datasets import get_dataset_config_names, get_dataset_default_config_name, load_dataset
8
+ from datasets.commands import BaseDatasetsCLICommand
9
+
10
+
11
+ def _command_factory(args):
12
+ return ConvertToParquetCommand(
13
+ args.dataset_id,
14
+ args.token,
15
+ args.revision,
16
+ args.trust_remote_code,
17
+ )
18
+
19
+
20
+ class ConvertToParquetCommand(BaseDatasetsCLICommand):
21
+ @staticmethod
22
+ def register_subcommand(parser):
23
+ parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet")
24
+ parser.add_argument("dataset_id", help="source dataset ID")
25
+ parser.add_argument("--token", help="access token to the Hugging Face Hub")
26
+ parser.add_argument("--revision", help="source revision")
27
+ parser.add_argument(
28
+ "--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script"
29
+ )
30
+ parser.set_defaults(func=_command_factory)
31
+
32
+ def __init__(
33
+ self,
34
+ dataset_id: str,
35
+ token: Optional[str],
36
+ revision: Optional[str],
37
+ trust_remote_code: bool,
38
+ ):
39
+ self._dataset_id = dataset_id
40
+ self._token = token
41
+ self._revision = revision
42
+ self._trust_remote_code = trust_remote_code
43
+
44
+ def run(self) -> None:
45
+ dataset_id = self._dataset_id
46
+ token = self._token
47
+ revision = self._revision
48
+ trust_remote_code = self._trust_remote_code
49
+ print(f"{dataset_id}")
50
+ configs = get_dataset_config_names(
51
+ dataset_id, token=token, revision=revision, trust_remote_code=trust_remote_code
52
+ )
53
+ print(f"{configs = }")
54
+ default_config = get_dataset_default_config_name(
55
+ dataset_id, token=token, revision=revision, trust_remote_code=trust_remote_code
56
+ )
57
+ print(f"{default_config = }")
58
+ if default_config:
59
+ config = default_config
60
+ configs.remove(default_config)
61
+ else:
62
+ config = configs.pop(0)
63
+ print(f"{config = }")
64
+ dataset = load_dataset(dataset_id, config, revision=revision, trust_remote_code=trust_remote_code)
65
+ commit_info = dataset.push_to_hub(
66
+ dataset_id,
67
+ config_name=config,
68
+ commit_message="Convert dataset to Parquet",
69
+ commit_description="Convert dataset to Parquet.",
70
+ create_pr=True,
71
+ token=token,
72
+ set_default=default_config is not None,
73
+ )
74
+ time.sleep(5)
75
+ if commit_info:
76
+ pr_revision, pr_url = commit_info.pr_revision, commit_info.pr_url
77
+ else:
78
+ pr_revision, pr_url = infer_pr(dataset_id, token=token)
79
+ for config in configs:
80
+ print(f"{config = }")
81
+ dataset = load_dataset(dataset_id, config, revision=revision, trust_remote_code=trust_remote_code)
82
+ dataset.push_to_hub(
83
+ dataset_id,
84
+ config_name=config,
85
+ commit_message=f"Add {config} data files",
86
+ revision=pr_revision,
87
+ token=token,
88
+ )
89
+ time.sleep(5)
90
+ delete_files(dataset_id, revision=pr_revision, token=token)
91
+ if not revision:
92
+ create_branch(dataset_id, branch="script", repo_type="dataset", token=token, exist_ok=True)
93
+ print(f"You can find your PR to convert the dataset to Parquet at: {pr_url}")
94
+
95
+
96
+ def infer_pr(dataset_id, token=None):
97
+ discussions = get_repo_discussions(dataset_id, repo_type="dataset", token=token)
98
+ prs = [discussion for discussion in discussions if discussion.is_pull_request and discussion.status == "open"]
99
+ pr = sorted(prs, key=lambda pr: pr.num)[-1]
100
+ return pr.git_reference, pr.url
101
+
102
+
103
+ def delete_files(dataset_id, revision=None, token=None):
104
+ dataset_name = dataset_id.split("/")[-1]
105
+ hf_api = HfApi(token=token)
106
+ repo_files = hf_api.list_repo_files(
107
+ dataset_id,
108
+ repo_type="dataset",
109
+ )
110
+ if repo_files:
111
+ legacy_json_file = []
112
+ python_files = []
113
+ data_files = []
114
+ for filename in repo_files:
115
+ if filename in {".gitattributes", "README.md"}:
116
+ continue
117
+ elif filename == f"{dataset_name}.py":
118
+ hf_api.delete_file(
119
+ filename,
120
+ dataset_id,
121
+ repo_type="dataset",
122
+ revision=revision,
123
+ commit_message="Delete loading script",
124
+ )
125
+ elif filename == "dataset_infos.json":
126
+ legacy_json_file.append(filename)
127
+ elif filename.endswith(".py"):
128
+ python_files.append(filename)
129
+ else:
130
+ data_files.append(filename)
131
+ if legacy_json_file:
132
+ hf_api.delete_file(
133
+ "dataset_infos.json",
134
+ dataset_id,
135
+ repo_type="dataset",
136
+ revision=revision,
137
+ commit_message="Delete legacy dataset_infos.json",
138
+ )
139
+ if python_files:
140
+ for filename in python_files:
141
+ hf_api.delete_file(
142
+ filename,
143
+ dataset_id,
144
+ repo_type="dataset",
145
+ revision=revision,
146
+ commit_message="Delete loading script auxiliary file",
147
+ )
148
+ if data_files:
149
+ for filename in data_files:
150
+ hf_api.delete_file(
151
+ filename,
152
+ dataset_id,
153
+ repo_type="dataset",
154
+ revision=revision,
155
+ commit_message="Delete data file",
156
+ )
venv/lib/python3.10/site-packages/datasets/commands/datasets_cli.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ from argparse import ArgumentParser
3
+
4
+ from datasets.commands.convert import ConvertCommand
5
+ from datasets.commands.convert_to_parquet import ConvertToParquetCommand
6
+ from datasets.commands.dummy_data import DummyDataCommand
7
+ from datasets.commands.env import EnvironmentCommand
8
+ from datasets.commands.run_beam import RunBeamCommand
9
+ from datasets.commands.test import TestCommand
10
+ from datasets.utils.logging import set_verbosity_info
11
+
12
+
13
+ def parse_unknown_args(unknown_args):
14
+ return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
15
+
16
+
17
+ def main():
18
+ parser = ArgumentParser(
19
+ "HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
20
+ )
21
+ commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
22
+ set_verbosity_info()
23
+
24
+ # Register commands
25
+ ConvertCommand.register_subcommand(commands_parser)
26
+ EnvironmentCommand.register_subcommand(commands_parser)
27
+ TestCommand.register_subcommand(commands_parser)
28
+ RunBeamCommand.register_subcommand(commands_parser)
29
+ DummyDataCommand.register_subcommand(commands_parser)
30
+ ConvertToParquetCommand.register_subcommand(commands_parser)
31
+
32
+ # Parse args
33
+ args, unknown_args = parser.parse_known_args()
34
+ if not hasattr(args, "func"):
35
+ parser.print_help()
36
+ exit(1)
37
+ kwargs = parse_unknown_args(unknown_args)
38
+
39
+ # Run
40
+ service = args.func(args, **kwargs)
41
+ service.run()
42
+
43
+
44
+ if __name__ == "__main__":
45
+ main()
venv/lib/python3.10/site-packages/datasets/commands/dummy_data.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fnmatch
2
+ import json
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import xml.etree.ElementTree as ET
7
+ from argparse import ArgumentParser
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+ from datasets import config
12
+ from datasets.commands import BaseDatasetsCLICommand
13
+ from datasets.download.download_config import DownloadConfig
14
+ from datasets.download.download_manager import DownloadManager
15
+ from datasets.download.mock_download_manager import MockDownloadManager
16
+ from datasets.load import dataset_module_factory, import_main_class
17
+ from datasets.utils.deprecation_utils import deprecated
18
+ from datasets.utils.logging import get_logger, set_verbosity_warning
19
+ from datasets.utils.py_utils import map_nested
20
+
21
+
22
+ logger = get_logger(__name__)
23
+
24
+ DEFAULT_ENCODING = "utf-8"
25
+
26
+
27
+ def dummy_data_command_factory(args):
28
+ return DummyDataCommand(
29
+ args.path_to_dataset,
30
+ args.auto_generate,
31
+ args.n_lines,
32
+ args.json_field,
33
+ args.xml_tag,
34
+ args.match_text_files,
35
+ args.keep_uncompressed,
36
+ args.cache_dir,
37
+ args.encoding,
38
+ )
39
+
40
+
41
+ class DummyDataGeneratorDownloadManager(DownloadManager):
42
+ def __init__(self, mock_download_manager, *args, **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.mock_download_manager = mock_download_manager
45
+ self.downloaded_dummy_paths = []
46
+ self.expected_dummy_paths = []
47
+
48
+ def download(self, url_or_urls):
49
+ output = super().download(url_or_urls)
50
+ dummy_output = self.mock_download_manager.download(url_or_urls)
51
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
52
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
53
+ return output
54
+
55
+ def download_and_extract(self, url_or_urls):
56
+ output = super().extract(super().download(url_or_urls))
57
+ dummy_output = self.mock_download_manager.download(url_or_urls)
58
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
59
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
60
+ return output
61
+
62
+ def auto_generate_dummy_data_folder(
63
+ self,
64
+ n_lines: int = 5,
65
+ json_field: Optional[str] = None,
66
+ xml_tag: Optional[str] = None,
67
+ match_text_files: Optional[str] = None,
68
+ encoding: Optional[str] = None,
69
+ ) -> bool:
70
+ os.makedirs(
71
+ os.path.join(
72
+ self.mock_download_manager.datasets_scripts_dir,
73
+ self.mock_download_manager.dataset_name,
74
+ self.mock_download_manager.dummy_data_folder,
75
+ "dummy_data",
76
+ ),
77
+ exist_ok=True,
78
+ )
79
+ total = 0
80
+ self.mock_download_manager.load_existing_dummy_data = False
81
+ for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
82
+ dst_path = os.path.join(
83
+ self.mock_download_manager.datasets_scripts_dir,
84
+ self.mock_download_manager.dataset_name,
85
+ self.mock_download_manager.dummy_data_folder,
86
+ relative_dst_path,
87
+ )
88
+ total += self._create_dummy_data(
89
+ src_path,
90
+ dst_path,
91
+ n_lines=n_lines,
92
+ json_field=json_field,
93
+ xml_tag=xml_tag,
94
+ match_text_files=match_text_files,
95
+ encoding=encoding,
96
+ )
97
+ if total == 0:
98
+ logger.error(
99
+ "Dummy data generation failed: no dummy files were created. "
100
+ "Make sure the data files format is supported by the auto-generation."
101
+ )
102
+ return total > 0
103
+
104
+ def _create_dummy_data(
105
+ self,
106
+ src_path: str,
107
+ dst_path: str,
108
+ n_lines: int,
109
+ json_field: Optional[str] = None,
110
+ xml_tag: Optional[str] = None,
111
+ match_text_files: Optional[str] = None,
112
+ encoding: Optional[str] = None,
113
+ ) -> int:
114
+ encoding = encoding or DEFAULT_ENCODING
115
+ if os.path.isfile(src_path):
116
+ logger.debug(f"Trying to generate dummy data file {dst_path}")
117
+ dst_path_extensions = Path(dst_path).suffixes
118
+ line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
119
+ is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
120
+ if match_text_files is not None:
121
+ file_name = os.path.basename(dst_path)
122
+ for pattern in match_text_files.split(","):
123
+ is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
124
+ # Line by line text file (txt, csv etc.)
125
+ if is_line_by_line_text_file:
126
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
127
+ with open(src_path, encoding=encoding) as src_file:
128
+ with open(dst_path, "w", encoding=encoding) as dst_file:
129
+ first_lines = []
130
+ for i, line in enumerate(src_file):
131
+ if i >= n_lines:
132
+ break
133
+ first_lines.append(line)
134
+ dst_file.write("".join(first_lines).strip())
135
+ return 1
136
+ # json file
137
+ elif ".json" in dst_path_extensions:
138
+ with open(src_path, encoding=encoding) as src_file:
139
+ json_data = json.load(src_file)
140
+ if json_field is not None:
141
+ json_data = json_data[json_field]
142
+ if isinstance(json_data, dict):
143
+ if not all(isinstance(v, list) for v in json_data.values()):
144
+ raise ValueError(
145
+ f"Couldn't parse columns {list(json_data.keys())}. "
146
+ "Maybe specify which json field must be used "
147
+ "to read the data with --json_field <my_field>."
148
+ )
149
+ first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
150
+ else:
151
+ first_json_data = json_data[:n_lines]
152
+ if json_field is not None:
153
+ first_json_data = {json_field: first_json_data}
154
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
155
+ with open(dst_path, "w", encoding=encoding) as dst_file:
156
+ json.dump(first_json_data, dst_file)
157
+ return 1
158
+ # xml file
159
+ elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
160
+ if xml_tag is None:
161
+ logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
162
+ else:
163
+ self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
164
+ return 1
165
+ logger.warning(
166
+ f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
167
+ )
168
+ return 0
169
+ # directory, iterate through all files
170
+ elif os.path.isdir(src_path):
171
+ total = 0
172
+ for path, _, files in os.walk(src_path):
173
+ for name in files:
174
+ if not name.startswith("."): # ignore files like .DS_Store etc.
175
+ src_file_path = os.path.join(path, name)
176
+ dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
177
+ total += self._create_dummy_data(
178
+ src_file_path,
179
+ dst_file_path,
180
+ n_lines=n_lines,
181
+ json_field=json_field,
182
+ xml_tag=xml_tag,
183
+ match_text_files=match_text_files,
184
+ encoding=encoding,
185
+ )
186
+ return total
187
+
188
+ @staticmethod
189
+ def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
190
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
191
+ with open(src_path, encoding=encoding) as src_file:
192
+ n_line = 0
193
+ parents = []
194
+ for event, elem in ET.iterparse(src_file, events=("start", "end")):
195
+ if event == "start":
196
+ parents.append(elem)
197
+ else:
198
+ _ = parents.pop()
199
+ if elem.tag == xml_tag:
200
+ if n_line < n_lines:
201
+ n_line += 1
202
+ else:
203
+ if parents:
204
+ parents[-1].remove(elem)
205
+ ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
206
+
207
+ def compress_autogenerated_dummy_data(self, path_to_dataset):
208
+ root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
209
+ base_name = os.path.join(root_dir, "dummy_data")
210
+ base_dir = "dummy_data"
211
+ logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
212
+ shutil.make_archive(base_name, "zip", root_dir, base_dir)
213
+ shutil.rmtree(base_name)
214
+
215
+
216
+ @deprecated(
217
+ "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
218
+ )
219
+ class DummyDataCommand(BaseDatasetsCLICommand):
220
+ @staticmethod
221
+ def register_subcommand(parser: ArgumentParser):
222
+ test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
223
+ test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
224
+ test_parser.add_argument(
225
+ "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
226
+ )
227
+ test_parser.add_argument(
228
+ "--json_field",
229
+ type=str,
230
+ default=None,
231
+ help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
232
+ )
233
+ test_parser.add_argument(
234
+ "--xml_tag",
235
+ type=str,
236
+ default=None,
237
+ help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
238
+ )
239
+ test_parser.add_argument(
240
+ "--match_text_files",
241
+ type=str,
242
+ default=None,
243
+ help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
244
+ )
245
+ test_parser.add_argument(
246
+ "--keep_uncompressed",
247
+ action="store_true",
248
+ help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
249
+ )
250
+ test_parser.add_argument(
251
+ "--cache_dir",
252
+ type=str,
253
+ default=None,
254
+ help="Cache directory to download and cache files when auto-generating dummy data",
255
+ )
256
+ test_parser.add_argument(
257
+ "--encoding",
258
+ type=str,
259
+ default=None,
260
+ help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
261
+ )
262
+ test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
263
+ test_parser.set_defaults(func=dummy_data_command_factory)
264
+
265
+ def __init__(
266
+ self,
267
+ path_to_dataset: str,
268
+ auto_generate: bool,
269
+ n_lines: int,
270
+ json_field: Optional[str],
271
+ xml_tag: Optional[str],
272
+ match_text_files: Optional[str],
273
+ keep_uncompressed: bool,
274
+ cache_dir: Optional[str],
275
+ encoding: Optional[str],
276
+ ):
277
+ self._path_to_dataset = path_to_dataset
278
+ if os.path.isdir(path_to_dataset):
279
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
280
+ else:
281
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
282
+ cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
283
+ self._auto_generate = auto_generate
284
+ self._n_lines = n_lines
285
+ self._json_field = json_field
286
+ self._xml_tag = xml_tag
287
+ self._match_text_files = match_text_files
288
+ self._keep_uncompressed = keep_uncompressed
289
+ self._cache_dir = cache_dir
290
+ self._encoding = encoding
291
+
292
+ def run(self):
293
+ set_verbosity_warning()
294
+ dataset_module = dataset_module_factory(self._path_to_dataset)
295
+ builder_cls = import_main_class(dataset_module.module_path)
296
+
297
+ # use `None` as config if no configs
298
+ builder_configs = builder_cls.BUILDER_CONFIGS or [None]
299
+ auto_generate_results = []
300
+ with tempfile.TemporaryDirectory() as tmp_dir:
301
+ for builder_config in builder_configs:
302
+ config_name = builder_config.name if builder_config else None
303
+ dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
304
+ version = builder_config.version if builder_config else dataset_builder.config.version
305
+ mock_dl_manager = MockDownloadManager(
306
+ dataset_name=self._dataset_name,
307
+ config=builder_config,
308
+ version=version,
309
+ use_local_dummy_data=True,
310
+ load_existing_dummy_data=False,
311
+ )
312
+
313
+ if self._auto_generate:
314
+ auto_generate_results.append(
315
+ self._autogenerate_dummy_data(
316
+ dataset_builder=dataset_builder,
317
+ mock_dl_manager=mock_dl_manager,
318
+ keep_uncompressed=self._keep_uncompressed,
319
+ )
320
+ )
321
+ else:
322
+ self._print_dummy_data_instructions(
323
+ dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
324
+ )
325
+ if self._auto_generate and not self._keep_uncompressed:
326
+ if all(auto_generate_results):
327
+ print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
328
+ else:
329
+ print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
330
+
331
+ def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
332
+ dl_cache_dir = (
333
+ os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
334
+ if self._cache_dir
335
+ else config.DOWNLOADED_DATASETS_PATH
336
+ )
337
+ download_config = DownloadConfig(cache_dir=dl_cache_dir)
338
+ dl_manager = DummyDataGeneratorDownloadManager(
339
+ dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
340
+ )
341
+ dataset_builder._split_generators(dl_manager)
342
+ mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
343
+ dl_manager.auto_generate_dummy_data_folder(
344
+ n_lines=self._n_lines,
345
+ json_field=self._json_field,
346
+ xml_tag=self._xml_tag,
347
+ match_text_files=self._match_text_files,
348
+ encoding=self._encoding,
349
+ )
350
+ if not keep_uncompressed:
351
+ path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
352
+ dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
353
+ # now test that the dummy_data.zip file actually works
354
+ mock_dl_manager.load_existing_dummy_data = True # use real dummy data
355
+ n_examples_per_split = {}
356
+ os.makedirs(dataset_builder._cache_dir, exist_ok=True)
357
+ try:
358
+ split_generators = dataset_builder._split_generators(mock_dl_manager)
359
+ for split_generator in split_generators:
360
+ dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
361
+ n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
362
+ except OSError as e:
363
+ logger.error(
364
+ f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
365
+ + str(e)
366
+ )
367
+ return False
368
+ else:
369
+ if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
370
+ logger.warning(
371
+ f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
372
+ )
373
+ return True
374
+ else:
375
+ empty_splits = [
376
+ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
377
+ ]
378
+ logger.warning(
379
+ f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
380
+ )
381
+ return False
382
+ else:
383
+ generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
384
+ logger.info(
385
+ f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
386
+ "Please compress this directory into a zip file to use it for dummy data tests."
387
+ )
388
+
389
+ def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
390
+ dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
391
+ logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
392
+ os.makedirs(dummy_data_folder, exist_ok=True)
393
+
394
+ try:
395
+ generator_splits = dataset_builder._split_generators(mock_dl_manager)
396
+ except FileNotFoundError as e:
397
+ print(
398
+ f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
399
+ )
400
+
401
+ files_to_create = set()
402
+ split_names = []
403
+ dummy_file_name = mock_dl_manager.dummy_file_name
404
+
405
+ for split in generator_splits:
406
+ logger.info(f"Collecting dummy data file paths to create for {split.name}")
407
+ split_names.append(split.name)
408
+ gen_kwargs = split.gen_kwargs
409
+ generator = dataset_builder._generate_examples(**gen_kwargs)
410
+
411
+ try:
412
+ dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
413
+ config_string = (
414
+ f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
415
+ )
416
+ dummy_data_guidance_print += (
417
+ "- In order to create the dummy data for "
418
+ + config_string
419
+ + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
420
+ )
421
+
422
+ # trigger generate function
423
+ for key, record in generator:
424
+ pass
425
+
426
+ dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
427
+
428
+ except FileNotFoundError as e:
429
+ files_to_create.add(e.filename)
430
+
431
+ split_names = ", ".join(split_names)
432
+ if len(files_to_create) > 0:
433
+ # no glob.glob(...) in `_generate_examples(...)`
434
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
435
+ dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
436
+ files_string = dummy_file_name
437
+ else:
438
+ files_string = ", ".join(files_to_create)
439
+ dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
440
+
441
+ dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
442
+
443
+ dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
444
+
445
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
446
+ dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
447
+
448
+ dummy_data_guidance_print += (
449
+ f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
450
+ )
451
+
452
+ dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
453
+ else:
454
+ dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
455
+
456
+ dummy_data_guidance_print += (
457
+ f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
458
+ )
459
+
460
+ dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
461
+
462
+ dummy_data_guidance_print += (
463
+ f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
464
+ )
465
+
466
+ dummy_data_guidance_print += 83 * "=" + "\n"
467
+
468
+ print(dummy_data_guidance_print)
venv/lib/python3.10/site-packages/datasets/commands/env.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ from argparse import ArgumentParser
3
+
4
+ import fsspec
5
+ import huggingface_hub
6
+ import pandas
7
+ import pyarrow
8
+
9
+ from datasets import __version__ as version
10
+ from datasets.commands import BaseDatasetsCLICommand
11
+
12
+
13
+ def info_command_factory(_):
14
+ return EnvironmentCommand()
15
+
16
+
17
+ class EnvironmentCommand(BaseDatasetsCLICommand):
18
+ @staticmethod
19
+ def register_subcommand(parser: ArgumentParser):
20
+ download_parser = parser.add_parser("env", help="Print relevant system environment info.")
21
+ download_parser.set_defaults(func=info_command_factory)
22
+
23
+ def run(self):
24
+ info = {
25
+ "`datasets` version": version,
26
+ "Platform": platform.platform(),
27
+ "Python version": platform.python_version(),
28
+ "`huggingface_hub` version": huggingface_hub.__version__,
29
+ "PyArrow version": pyarrow.__version__,
30
+ "Pandas version": pandas.__version__,
31
+ "`fsspec` version": fsspec.__version__,
32
+ }
33
+
34
+ print("\nCopy-and-paste the text below in your GitHub issue.\n")
35
+ print(self.format_dict(info))
36
+
37
+ return info
38
+
39
+ @staticmethod
40
+ def format_dict(d):
41
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
venv/lib/python3.10/site-packages/datasets/commands/run_beam.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+ from shutil import copyfile
5
+ from typing import List
6
+
7
+ from datasets import config
8
+ from datasets.builder import DatasetBuilder
9
+ from datasets.commands import BaseDatasetsCLICommand
10
+ from datasets.download.download_config import DownloadConfig
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.deprecation_utils import deprecated
14
+ from datasets.utils.info_utils import VerificationMode
15
+
16
+
17
+ def run_beam_command_factory(args, **kwargs):
18
+ return RunBeamCommand(
19
+ args.dataset,
20
+ args.name,
21
+ args.cache_dir,
22
+ args.beam_pipeline_options,
23
+ args.data_dir,
24
+ args.all_configs,
25
+ args.save_info or args.save_infos,
26
+ args.ignore_verifications,
27
+ args.force_redownload,
28
+ **kwargs,
29
+ )
30
+
31
+
32
+ @deprecated(
33
+ "`BeamBasedBuilder` and `datasets-cli run_beam` are deprecated and will be removed in v3.0.0. Please use `GeneratorBasedBuilder` or `ArrowBasedBuilder` instead."
34
+ )
35
+ class RunBeamCommand(BaseDatasetsCLICommand):
36
+ @staticmethod
37
+ def register_subcommand(parser: ArgumentParser):
38
+ run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline")
39
+ run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
40
+ run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name")
41
+ run_beam_parser.add_argument(
42
+ "--cache_dir",
43
+ type=str,
44
+ default=None,
45
+ help="Cache directory where the datasets are stored",
46
+ )
47
+ run_beam_parser.add_argument(
48
+ "--beam_pipeline_options",
49
+ type=str,
50
+ default="",
51
+ help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`",
52
+ )
53
+ run_beam_parser.add_argument(
54
+ "--data_dir",
55
+ type=str,
56
+ default=None,
57
+ help="Can be used to specify a manual directory to get the files from",
58
+ )
59
+ run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
60
+ run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file")
61
+ run_beam_parser.add_argument(
62
+ "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
63
+ )
64
+ run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
65
+ # aliases
66
+ run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info")
67
+ run_beam_parser.set_defaults(func=run_beam_command_factory)
68
+
69
+ def __init__(
70
+ self,
71
+ dataset: str,
72
+ name: str,
73
+ cache_dir: str,
74
+ beam_pipeline_options: str,
75
+ data_dir: str,
76
+ all_configs: bool,
77
+ save_infos: bool,
78
+ ignore_verifications: bool,
79
+ force_redownload: bool,
80
+ **config_kwargs,
81
+ ):
82
+ self._dataset = dataset
83
+ self._name = name
84
+ self._cache_dir = cache_dir
85
+ self._beam_pipeline_options = beam_pipeline_options
86
+ self._data_dir = data_dir
87
+ self._all_configs = all_configs
88
+ self._save_infos = save_infos
89
+ self._ignore_verifications = ignore_verifications
90
+ self._force_redownload = force_redownload
91
+ self._config_kwargs = config_kwargs
92
+
93
+ def run(self):
94
+ import apache_beam as beam
95
+
96
+ if self._name is not None and self._all_configs:
97
+ print("Both parameters `name` and `all_configs` can't be used at once.")
98
+ exit(1)
99
+ path, config_name = self._dataset, self._name
100
+ dataset_module = dataset_module_factory(path)
101
+ builder_cls = import_main_class(dataset_module.module_path)
102
+ builders: List[DatasetBuilder] = []
103
+ if self._beam_pipeline_options:
104
+ beam_options = beam.options.pipeline_options.PipelineOptions(
105
+ flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt]
106
+ )
107
+ else:
108
+ beam_options = None
109
+ if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
110
+ for builder_config in builder_cls.BUILDER_CONFIGS:
111
+ builders.append(
112
+ builder_cls(
113
+ config_name=builder_config.name,
114
+ data_dir=self._data_dir,
115
+ hash=dataset_module.hash,
116
+ beam_options=beam_options,
117
+ cache_dir=self._cache_dir,
118
+ base_path=dataset_module.builder_kwargs.get("base_path"),
119
+ )
120
+ )
121
+ else:
122
+ builders.append(
123
+ builder_cls(
124
+ config_name=config_name,
125
+ data_dir=self._data_dir,
126
+ beam_options=beam_options,
127
+ cache_dir=self._cache_dir,
128
+ base_path=dataset_module.builder_kwargs.get("base_path"),
129
+ **self._config_kwargs,
130
+ )
131
+ )
132
+
133
+ for builder in builders:
134
+ builder.download_and_prepare(
135
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
136
+ if not self._force_redownload
137
+ else DownloadMode.FORCE_REDOWNLOAD,
138
+ download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH),
139
+ verification_mode=VerificationMode.NO_CHECKS
140
+ if self._ignore_verifications
141
+ else VerificationMode.ALL_CHECKS,
142
+ )
143
+ if self._save_infos:
144
+ builder._save_infos()
145
+
146
+ print("Apache beam run successful.")
147
+
148
+ # If save_infos=True, the dataset infos file is created next to the loaded module file.
149
+ # Let's move it to the original directory of the dataset script, to allow the user to
150
+ # upload them on S3 at the same time afterwards.
151
+ if self._save_infos:
152
+ dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
153
+
154
+ name = Path(path).name + ".py"
155
+
156
+ combined_path = os.path.join(path, name)
157
+ if os.path.isfile(path):
158
+ dataset_dir = os.path.dirname(path)
159
+ elif os.path.isfile(combined_path):
160
+ dataset_dir = path
161
+ else: # in case of a remote dataset
162
+ print(f"Dataset Infos file saved at {dataset_infos_path}")
163
+ exit(1)
164
+
165
+ # Move datasetinfo back to the user
166
+ user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME)
167
+ copyfile(dataset_infos_path, user_dataset_infos_path)
168
+ print(f"Dataset Infos file saved at {user_dataset_infos_path}")
venv/lib/python3.10/site-packages/datasets/commands/test.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from argparse import ArgumentParser
4
+ from pathlib import Path
5
+ from shutil import copyfile, rmtree
6
+ from typing import Generator
7
+
8
+ import datasets.config
9
+ from datasets.builder import DatasetBuilder
10
+ from datasets.commands import BaseDatasetsCLICommand
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.info_utils import VerificationMode
14
+ from datasets.utils.logging import ERROR, get_logger
15
+
16
+
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def _test_command_factory(args):
21
+ return TestCommand(
22
+ args.dataset,
23
+ args.name,
24
+ args.cache_dir,
25
+ args.data_dir,
26
+ args.all_configs,
27
+ args.save_info or args.save_infos,
28
+ args.ignore_verifications,
29
+ args.force_redownload,
30
+ args.clear_cache,
31
+ args.num_proc,
32
+ )
33
+
34
+
35
+ class TestCommand(BaseDatasetsCLICommand):
36
+ __test__ = False # to tell pytest it's not a test class
37
+
38
+ @staticmethod
39
+ def register_subcommand(parser: ArgumentParser):
40
+ test_parser = parser.add_parser("test", help="Test dataset implementation.")
41
+ test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
42
+ test_parser.add_argument(
43
+ "--cache_dir",
44
+ type=str,
45
+ default=None,
46
+ help="Cache directory where the datasets are stored.",
47
+ )
48
+ test_parser.add_argument(
49
+ "--data_dir",
50
+ type=str,
51
+ default=None,
52
+ help="Can be used to specify a manual directory to get the files from.",
53
+ )
54
+ test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
55
+ test_parser.add_argument(
56
+ "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
57
+ )
58
+ test_parser.add_argument(
59
+ "--ignore_verifications",
60
+ action="store_true",
61
+ help="Run the test without checksums and splits checks.",
62
+ )
63
+ test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
64
+ test_parser.add_argument(
65
+ "--clear_cache",
66
+ action="store_true",
67
+ help="Remove downloaded files and cached datasets after each config test",
68
+ )
69
+ test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes")
70
+ # aliases
71
+ test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
72
+ test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
73
+ test_parser.set_defaults(func=_test_command_factory)
74
+
75
+ def __init__(
76
+ self,
77
+ dataset: str,
78
+ name: str,
79
+ cache_dir: str,
80
+ data_dir: str,
81
+ all_configs: bool,
82
+ save_infos: bool,
83
+ ignore_verifications: bool,
84
+ force_redownload: bool,
85
+ clear_cache: bool,
86
+ num_proc: int,
87
+ ):
88
+ self._dataset = dataset
89
+ self._name = name
90
+ self._cache_dir = cache_dir
91
+ self._data_dir = data_dir
92
+ self._all_configs = all_configs
93
+ self._save_infos = save_infos
94
+ self._ignore_verifications = ignore_verifications
95
+ self._force_redownload = force_redownload
96
+ self._clear_cache = clear_cache
97
+ self._num_proc = num_proc
98
+ if clear_cache and not cache_dir:
99
+ print(
100
+ "When --clear_cache is used, specifying a cache directory is mandatory.\n"
101
+ "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
102
+ "Please provide a --cache_dir that will be used to test the dataset script."
103
+ )
104
+ exit(1)
105
+ if save_infos:
106
+ self._ignore_verifications = True
107
+
108
+ def run(self):
109
+ logging.getLogger("filelock").setLevel(ERROR)
110
+ if self._name is not None and self._all_configs:
111
+ print("Both parameters `config` and `all_configs` can't be used at once.")
112
+ exit(1)
113
+ path, config_name = self._dataset, self._name
114
+ module = dataset_module_factory(path)
115
+ builder_cls = import_main_class(module.module_path)
116
+ n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
117
+
118
+ def get_builders() -> Generator[DatasetBuilder, None, None]:
119
+ if self._all_configs and builder_cls.BUILDER_CONFIGS:
120
+ for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
121
+ if "config_name" in module.builder_kwargs:
122
+ yield builder_cls(
123
+ cache_dir=self._cache_dir,
124
+ data_dir=self._data_dir,
125
+ **module.builder_kwargs,
126
+ )
127
+ else:
128
+ yield builder_cls(
129
+ config_name=config.name,
130
+ cache_dir=self._cache_dir,
131
+ data_dir=self._data_dir,
132
+ **module.builder_kwargs,
133
+ )
134
+ else:
135
+ if "config_name" in module.builder_kwargs:
136
+ yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
137
+ else:
138
+ yield builder_cls(
139
+ config_name=config_name,
140
+ cache_dir=self._cache_dir,
141
+ data_dir=self._data_dir,
142
+ **module.builder_kwargs,
143
+ )
144
+
145
+ for j, builder in enumerate(get_builders()):
146
+ print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
147
+ builder._record_infos = os.path.exists(
148
+ os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
149
+ ) # record checksums only if we need to update a (deprecated) dataset_infos.json
150
+ builder.download_and_prepare(
151
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
152
+ if not self._force_redownload
153
+ else DownloadMode.FORCE_REDOWNLOAD,
154
+ verification_mode=VerificationMode.NO_CHECKS
155
+ if self._ignore_verifications
156
+ else VerificationMode.ALL_CHECKS,
157
+ try_from_hf_gcs=False,
158
+ num_proc=self._num_proc,
159
+ )
160
+ builder.as_dataset()
161
+ if self._save_infos:
162
+ builder._save_infos()
163
+
164
+ # If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
165
+ # The dataset_infos are saved in the YAML part of the README.md
166
+
167
+ # Let's move it to the original directory of the dataset script, to allow the user to
168
+ # upload them on S3 at the same time afterwards.
169
+ if self._save_infos:
170
+ dataset_readme_path = os.path.join(
171
+ builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME
172
+ )
173
+ name = Path(path).name + ".py"
174
+ combined_path = os.path.join(path, name)
175
+ if os.path.isfile(path):
176
+ dataset_dir = os.path.dirname(path)
177
+ elif os.path.isfile(combined_path):
178
+ dataset_dir = path
179
+ elif os.path.isdir(path): # for local directories containing only data files
180
+ dataset_dir = path
181
+ else: # in case of a remote dataset
182
+ dataset_dir = None
183
+ print(f"Dataset card saved at {dataset_readme_path}")
184
+
185
+ # Move dataset_info back to the user
186
+ if dataset_dir is not None:
187
+ user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME)
188
+ copyfile(dataset_readme_path, user_dataset_readme_path)
189
+ print(f"Dataset card saved at {user_dataset_readme_path}")
190
+
191
+ # If clear_cache=True, the download folder and the dataset builder cache directory are deleted
192
+ if self._clear_cache:
193
+ if os.path.isdir(builder._cache_dir):
194
+ logger.warning(f"Clearing cache at {builder._cache_dir}")
195
+ rmtree(builder._cache_dir)
196
+ download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
197
+ if os.path.isdir(download_dir):
198
+ logger.warning(f"Clearing cache at {download_dir}")
199
+ rmtree(download_dir)
200
+
201
+ print("Test successful.")
venv/lib/python3.10/site-packages/datasets/formatting/__init__.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ruff: noqa
16
+
17
+ from typing import Dict, List, Optional, Type
18
+
19
+ from .. import config
20
+ from ..utils import logging
21
+ from .formatting import (
22
+ ArrowFormatter,
23
+ CustomFormatter,
24
+ Formatter,
25
+ PandasFormatter,
26
+ PythonFormatter,
27
+ TensorFormatter,
28
+ format_table,
29
+ query_table,
30
+ )
31
+ from .np_formatter import NumpyFormatter
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {}
37
+ _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
38
+ _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
39
+
40
+
41
+ def _register_formatter(
42
+ formatter_cls: type,
43
+ format_type: Optional[str],
44
+ aliases: Optional[List[str]] = None,
45
+ ):
46
+ """
47
+ Register a Formatter object using a name and optional aliases.
48
+ This function must be used on a Formatter class.
49
+ """
50
+ aliases = aliases if aliases is not None else []
51
+ if format_type in _FORMAT_TYPES:
52
+ logger.warning(
53
+ f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
54
+ )
55
+ _FORMAT_TYPES[format_type] = formatter_cls
56
+ for alias in set(aliases + [format_type]):
57
+ if alias in _FORMAT_TYPES_ALIASES:
58
+ logger.warning(
59
+ f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
60
+ )
61
+ _FORMAT_TYPES_ALIASES[alias] = format_type
62
+
63
+
64
+ def _register_unavailable_formatter(
65
+ unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
66
+ ):
67
+ """
68
+ Register an unavailable Formatter object using a name and optional aliases.
69
+ This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
70
+ """
71
+ aliases = aliases if aliases is not None else []
72
+ for alias in set(aliases + [format_type]):
73
+ _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
74
+
75
+
76
+ # Here we define all the available formatting functions that can be used by `Dataset.set_format`
77
+ _register_formatter(PythonFormatter, None, aliases=["python"])
78
+ _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
79
+ _register_formatter(NumpyFormatter, "numpy", aliases=["np"])
80
+ _register_formatter(PandasFormatter, "pandas", aliases=["pd"])
81
+ _register_formatter(CustomFormatter, "custom")
82
+
83
+ if config.POLARS_AVAILABLE:
84
+ from .polars_formatter import PolarsFormatter
85
+
86
+ _register_formatter(PolarsFormatter, "polars", aliases=["pl"])
87
+ else:
88
+ _polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.")
89
+ _register_unavailable_formatter(_polars_error, "polars", aliases=["pl"])
90
+
91
+ if config.TORCH_AVAILABLE:
92
+ from .torch_formatter import TorchFormatter
93
+
94
+ _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
95
+ else:
96
+ _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
97
+ _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
98
+
99
+ if config.TF_AVAILABLE:
100
+ from .tf_formatter import TFFormatter
101
+
102
+ _register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
103
+ else:
104
+ _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
105
+ _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
106
+
107
+ if config.JAX_AVAILABLE:
108
+ from .jax_formatter import JaxFormatter
109
+
110
+ _register_formatter(JaxFormatter, "jax", aliases=[])
111
+ else:
112
+ _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
113
+ _register_unavailable_formatter(_jax_error, "jax", aliases=[])
114
+
115
+
116
+ def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
117
+ """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
118
+ if format_type in _FORMAT_TYPES_ALIASES:
119
+ return _FORMAT_TYPES_ALIASES[format_type]
120
+ else:
121
+ return format_type
122
+
123
+
124
+ def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
125
+ """
126
+ Factory function to get a Formatter given its type name and keyword arguments.
127
+ A formatter is an object that extracts and formats data from pyarrow table.
128
+ It defines the formatting for rows, colums and batches.
129
+ If the formatter for a given type name doesn't exist or is not available, an error is raised.
130
+ """
131
+ format_type = get_format_type_from_alias(format_type)
132
+ if format_type in _FORMAT_TYPES:
133
+ return _FORMAT_TYPES[format_type](**format_kwargs)
134
+ if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
135
+ raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
136
+ else:
137
+ raise ValueError(
138
+ f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'"
139
+ )
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.09 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc ADDED
Binary file (4.07 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
venv/lib/python3.10/site-packages/datasets/formatting/formatting.py ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import operator
16
+ from collections.abc import Mapping, MutableMapping
17
+ from functools import partial
18
+
19
+ # Lint as: python3
20
+ from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
21
+
22
+ import numpy as np
23
+ import pandas as pd
24
+ import pyarrow as pa
25
+ from packaging import version
26
+
27
+ from .. import config
28
+ from ..features import Features
29
+ from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
30
+ from ..table import Table
31
+ from ..utils.py_utils import no_op_if_value_is_null
32
+
33
+
34
+ T = TypeVar("T")
35
+
36
+ RowFormat = TypeVar("RowFormat")
37
+ ColumnFormat = TypeVar("ColumnFormat")
38
+ BatchFormat = TypeVar("BatchFormat")
39
+
40
+
41
+ def _is_range_contiguous(key: range) -> bool:
42
+ return key.step == 1 and key.stop >= key.start
43
+
44
+
45
+ def _raise_bad_key_type(key: Any):
46
+ raise TypeError(
47
+ f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
48
+ )
49
+
50
+
51
+ def _query_table_with_indices_mapping(
52
+ table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
53
+ ) -> pa.Table:
54
+ """
55
+ Query a pyarrow Table to extract the subtable that correspond to the given key.
56
+ The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
57
+ account a shuffling or an indices selection for example.
58
+ The indices table must contain one column named "indices" of type uint64.
59
+ """
60
+ if isinstance(key, int):
61
+ key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
62
+ return _query_table(table, key)
63
+ if isinstance(key, slice):
64
+ key = range(*key.indices(indices.num_rows))
65
+ if isinstance(key, range):
66
+ if _is_range_contiguous(key) and key.start >= 0:
67
+ return _query_table(
68
+ table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
69
+ )
70
+ else:
71
+ pass # treat as an iterable
72
+ if isinstance(key, str):
73
+ table = table.select([key])
74
+ return _query_table(table, indices.column(0).to_pylist())
75
+ if isinstance(key, Iterable):
76
+ return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
77
+
78
+ _raise_bad_key_type(key)
79
+
80
+
81
+ def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
82
+ """
83
+ Query a pyarrow Table to extract the subtable that correspond to the given key.
84
+ """
85
+ if isinstance(key, int):
86
+ return table.fast_slice(key % table.num_rows, 1)
87
+ if isinstance(key, slice):
88
+ key = range(*key.indices(table.num_rows))
89
+ if isinstance(key, range):
90
+ if _is_range_contiguous(key) and key.start >= 0:
91
+ return table.fast_slice(key.start, key.stop - key.start)
92
+ else:
93
+ pass # treat as an iterable
94
+ if isinstance(key, str):
95
+ return table.table.drop([column for column in table.column_names if column != key])
96
+ if isinstance(key, Iterable):
97
+ key = np.fromiter(key, np.int64)
98
+ if len(key) == 0:
99
+ return table.table.slice(0, 0)
100
+ # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
101
+ return table.fast_gather(key % table.num_rows)
102
+
103
+ _raise_bad_key_type(key)
104
+
105
+
106
+ def _is_array_with_nulls(pa_array: pa.Array) -> bool:
107
+ return pa_array.null_count > 0
108
+
109
+
110
+ class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
111
+ """
112
+ Arrow extractor are used to extract data from pyarrow tables.
113
+ It makes it possible to extract rows, columns and batches.
114
+ These three extractions types have to be implemented.
115
+ """
116
+
117
+ def extract_row(self, pa_table: pa.Table) -> RowFormat:
118
+ raise NotImplementedError
119
+
120
+ def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
121
+ raise NotImplementedError
122
+
123
+ def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
124
+ raise NotImplementedError
125
+
126
+
127
+ def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
128
+ """Return the first element of a batch (dict) as a row (dict)"""
129
+ return {key: array[0] for key, array in py_dict.items()}
130
+
131
+
132
+ class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
133
+ def extract_row(self, pa_table: pa.Table) -> pa.Table:
134
+ return pa_table
135
+
136
+ def extract_column(self, pa_table: pa.Table) -> pa.Array:
137
+ return pa_table.column(0)
138
+
139
+ def extract_batch(self, pa_table: pa.Table) -> pa.Table:
140
+ return pa_table
141
+
142
+
143
+ class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
144
+ def extract_row(self, pa_table: pa.Table) -> dict:
145
+ return _unnest(pa_table.to_pydict())
146
+
147
+ def extract_column(self, pa_table: pa.Table) -> list:
148
+ return pa_table.column(0).to_pylist()
149
+
150
+ def extract_batch(self, pa_table: pa.Table) -> dict:
151
+ return pa_table.to_pydict()
152
+
153
+
154
+ class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
155
+ def __init__(self, **np_array_kwargs):
156
+ self.np_array_kwargs = np_array_kwargs
157
+
158
+ def extract_row(self, pa_table: pa.Table) -> dict:
159
+ return _unnest(self.extract_batch(pa_table))
160
+
161
+ def extract_column(self, pa_table: pa.Table) -> np.ndarray:
162
+ return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
163
+
164
+ def extract_batch(self, pa_table: pa.Table) -> dict:
165
+ return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
166
+
167
+ def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
168
+ if isinstance(pa_array, pa.ChunkedArray):
169
+ if isinstance(pa_array.type, _ArrayXDExtensionType):
170
+ # don't call to_pylist() to preserve dtype of the fixed-size array
171
+ zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
172
+ array: List = [
173
+ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
174
+ ]
175
+ else:
176
+ zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
177
+ not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
178
+ )
179
+ array: List = [
180
+ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
181
+ ]
182
+ else:
183
+ if isinstance(pa_array.type, _ArrayXDExtensionType):
184
+ # don't call to_pylist() to preserve dtype of the fixed-size array
185
+ zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
186
+ array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
187
+ else:
188
+ zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
189
+ array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
190
+ if len(array) > 0:
191
+ if any(
192
+ (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
193
+ or (isinstance(x, float) and np.isnan(x))
194
+ for x in array
195
+ ):
196
+ return np.array(array, copy=False, dtype=object)
197
+ return np.array(array, copy=False)
198
+
199
+
200
+ class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
201
+ def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
202
+ return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
203
+
204
+ def extract_column(self, pa_table: pa.Table) -> pd.Series:
205
+ return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
206
+
207
+ def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
208
+ return pa_table.to_pandas(types_mapper=pandas_types_mapper)
209
+
210
+
211
+ class PythonFeaturesDecoder:
212
+ def __init__(self, features: Optional[Features]):
213
+ self.features = features
214
+
215
+ def decode_row(self, row: dict) -> dict:
216
+ return self.features.decode_example(row) if self.features else row
217
+
218
+ def decode_column(self, column: list, column_name: str) -> list:
219
+ return self.features.decode_column(column, column_name) if self.features else column
220
+
221
+ def decode_batch(self, batch: dict) -> dict:
222
+ return self.features.decode_batch(batch) if self.features else batch
223
+
224
+
225
+ class PandasFeaturesDecoder:
226
+ def __init__(self, features: Optional[Features]):
227
+ self.features = features
228
+
229
+ def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
230
+ decode = (
231
+ {
232
+ column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
233
+ for column_name, feature in self.features.items()
234
+ if self.features._column_requires_decoding[column_name]
235
+ }
236
+ if self.features
237
+ else {}
238
+ )
239
+ if decode:
240
+ row[list(decode.keys())] = row.transform(decode)
241
+ return row
242
+
243
+ def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
244
+ decode = (
245
+ no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
246
+ if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
247
+ else None
248
+ )
249
+ if decode:
250
+ column = column.transform(decode)
251
+ return column
252
+
253
+ def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
254
+ return self.decode_row(batch)
255
+
256
+
257
+ class LazyDict(MutableMapping):
258
+ """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
259
+
260
+ def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
261
+ self.pa_table = pa_table
262
+ self.formatter = formatter
263
+
264
+ self.data = {key: None for key in pa_table.column_names}
265
+ self.keys_to_format = set(self.data.keys())
266
+
267
+ def __len__(self):
268
+ return len(self.data)
269
+
270
+ def __getitem__(self, key):
271
+ value = self.data[key]
272
+ if key in self.keys_to_format:
273
+ value = self.format(key)
274
+ self.data[key] = value
275
+ self.keys_to_format.remove(key)
276
+ return value
277
+
278
+ def __setitem__(self, key, value):
279
+ if key in self.keys_to_format:
280
+ self.keys_to_format.remove(key)
281
+ self.data[key] = value
282
+
283
+ def __delitem__(self, key) -> None:
284
+ if key in self.keys_to_format:
285
+ self.keys_to_format.remove(key)
286
+ del self.data[key]
287
+
288
+ def __iter__(self):
289
+ return iter(self.data)
290
+
291
+ def __contains__(self, key):
292
+ return key in self.data
293
+
294
+ def __repr__(self):
295
+ self._format_all()
296
+ return repr(self.data)
297
+
298
+ if config.PY_VERSION >= version.parse("3.9"):
299
+ # merging with the union ("|") operator is supported in Python 3.9+
300
+
301
+ def __or__(self, other):
302
+ if isinstance(other, LazyDict):
303
+ inst = self.copy()
304
+ other = other.copy()
305
+ other._format_all()
306
+ inst.keys_to_format -= other.data.keys()
307
+ inst.data = inst.data | other.data
308
+ return inst
309
+ if isinstance(other, dict):
310
+ inst = self.copy()
311
+ inst.keys_to_format -= other.keys()
312
+ inst.data = inst.data | other
313
+ return inst
314
+ return NotImplemented
315
+
316
+ def __ror__(self, other):
317
+ if isinstance(other, LazyDict):
318
+ inst = self.copy()
319
+ other = other.copy()
320
+ other._format_all()
321
+ inst.keys_to_format -= other.data.keys()
322
+ inst.data = other.data | inst.data
323
+ return inst
324
+ if isinstance(other, dict):
325
+ inst = self.copy()
326
+ inst.keys_to_format -= other.keys()
327
+ inst.data = other | inst.data
328
+ return inst
329
+ return NotImplemented
330
+
331
+ def __ior__(self, other):
332
+ if isinstance(other, LazyDict):
333
+ other = other.copy()
334
+ other._format_all()
335
+ self.keys_to_format -= other.data.keys()
336
+ self.data |= other.data
337
+ else:
338
+ self.keys_to_format -= other.keys()
339
+ self.data |= other
340
+ return self
341
+
342
+ def __copy__(self):
343
+ # Identical to `UserDict.__copy__`
344
+ inst = self.__class__.__new__(self.__class__)
345
+ inst.__dict__.update(self.__dict__)
346
+ # Create a copy and avoid triggering descriptors
347
+ inst.__dict__["data"] = self.__dict__["data"].copy()
348
+ inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
349
+ return inst
350
+
351
+ def copy(self):
352
+ import copy
353
+
354
+ return copy.copy(self)
355
+
356
+ @classmethod
357
+ def fromkeys(cls, iterable, value=None):
358
+ raise NotImplementedError
359
+
360
+ def format(self, key):
361
+ raise NotImplementedError
362
+
363
+ def _format_all(self):
364
+ for key in self.keys_to_format:
365
+ self.data[key] = self.format(key)
366
+ self.keys_to_format.clear()
367
+
368
+
369
+ class LazyRow(LazyDict):
370
+ def format(self, key):
371
+ return self.formatter.format_column(self.pa_table.select([key]))[0]
372
+
373
+
374
+ class LazyBatch(LazyDict):
375
+ def format(self, key):
376
+ return self.formatter.format_column(self.pa_table.select([key]))
377
+
378
+
379
+ class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
380
+ """
381
+ A formatter is an object that extracts and formats data from pyarrow tables.
382
+ It defines the formatting for rows, columns and batches.
383
+ """
384
+
385
+ simple_arrow_extractor = SimpleArrowExtractor
386
+ python_arrow_extractor = PythonArrowExtractor
387
+ numpy_arrow_extractor = NumpyArrowExtractor
388
+ pandas_arrow_extractor = PandasArrowExtractor
389
+
390
+ def __init__(self, features: Optional[Features] = None):
391
+ self.features = features
392
+ self.python_features_decoder = PythonFeaturesDecoder(self.features)
393
+ self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
394
+
395
+ def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
396
+ if query_type == "row":
397
+ return self.format_row(pa_table)
398
+ elif query_type == "column":
399
+ return self.format_column(pa_table)
400
+ elif query_type == "batch":
401
+ return self.format_batch(pa_table)
402
+
403
+ def format_row(self, pa_table: pa.Table) -> RowFormat:
404
+ raise NotImplementedError
405
+
406
+ def format_column(self, pa_table: pa.Table) -> ColumnFormat:
407
+ raise NotImplementedError
408
+
409
+ def format_batch(self, pa_table: pa.Table) -> BatchFormat:
410
+ raise NotImplementedError
411
+
412
+
413
+ class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
414
+ def recursive_tensorize(self, data_struct: dict):
415
+ raise NotImplementedError
416
+
417
+
418
+ class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
419
+ def format_row(self, pa_table: pa.Table) -> pa.Table:
420
+ return self.simple_arrow_extractor().extract_row(pa_table)
421
+
422
+ def format_column(self, pa_table: pa.Table) -> pa.Array:
423
+ return self.simple_arrow_extractor().extract_column(pa_table)
424
+
425
+ def format_batch(self, pa_table: pa.Table) -> pa.Table:
426
+ return self.simple_arrow_extractor().extract_batch(pa_table)
427
+
428
+
429
+ class PythonFormatter(Formatter[Mapping, list, Mapping]):
430
+ def __init__(self, features=None, lazy=False):
431
+ super().__init__(features)
432
+ self.lazy = lazy
433
+
434
+ def format_row(self, pa_table: pa.Table) -> Mapping:
435
+ if self.lazy:
436
+ return LazyRow(pa_table, self)
437
+ row = self.python_arrow_extractor().extract_row(pa_table)
438
+ row = self.python_features_decoder.decode_row(row)
439
+ return row
440
+
441
+ def format_column(self, pa_table: pa.Table) -> list:
442
+ column = self.python_arrow_extractor().extract_column(pa_table)
443
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
444
+ return column
445
+
446
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
447
+ if self.lazy:
448
+ return LazyBatch(pa_table, self)
449
+ batch = self.python_arrow_extractor().extract_batch(pa_table)
450
+ batch = self.python_features_decoder.decode_batch(batch)
451
+ return batch
452
+
453
+
454
+ class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
455
+ def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
456
+ row = self.pandas_arrow_extractor().extract_row(pa_table)
457
+ row = self.pandas_features_decoder.decode_row(row)
458
+ return row
459
+
460
+ def format_column(self, pa_table: pa.Table) -> pd.Series:
461
+ column = self.pandas_arrow_extractor().extract_column(pa_table)
462
+ column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
463
+ return column
464
+
465
+ def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
466
+ row = self.pandas_arrow_extractor().extract_batch(pa_table)
467
+ row = self.pandas_features_decoder.decode_batch(row)
468
+ return row
469
+
470
+
471
+ class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
472
+ """
473
+ A user-defined custom formatter function defined by a ``transform``.
474
+ The transform must take as input a batch of data extracted for an arrow table using the python extractor,
475
+ and return a batch.
476
+ If the output batch is not a dict, then output_all_columns won't work.
477
+ If the ouput batch has several fields, then querying a single column won't work since we don't know which field
478
+ to return.
479
+ """
480
+
481
+ def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs):
482
+ super().__init__(features=features)
483
+ self.transform = transform
484
+
485
+ def format_row(self, pa_table: pa.Table) -> dict:
486
+ formatted_batch = self.format_batch(pa_table)
487
+ try:
488
+ return _unnest(formatted_batch)
489
+ except Exception as exc:
490
+ raise TypeError(
491
+ f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
492
+ ) from exc
493
+
494
+ def format_column(self, pa_table: pa.Table) -> ColumnFormat:
495
+ formatted_batch = self.format_batch(pa_table)
496
+ if hasattr(formatted_batch, "keys"):
497
+ if len(formatted_batch.keys()) > 1:
498
+ raise TypeError(
499
+ "Tried to query a column but the custom formatting function returns too many columns. "
500
+ f"Only one column was expected but got columns {list(formatted_batch.keys())}."
501
+ )
502
+ else:
503
+ raise TypeError(
504
+ f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
505
+ )
506
+ try:
507
+ return formatted_batch[pa_table.column_names[0]]
508
+ except Exception as exc:
509
+ raise TypeError(
510
+ f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
511
+ ) from exc
512
+
513
+ def format_batch(self, pa_table: pa.Table) -> dict:
514
+ batch = self.python_arrow_extractor().extract_batch(pa_table)
515
+ batch = self.python_features_decoder.decode_batch(batch)
516
+ return self.transform(batch)
517
+
518
+
519
+ def _check_valid_column_key(key: str, columns: List[str]) -> None:
520
+ if key not in columns:
521
+ raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
522
+
523
+
524
+ def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
525
+ if isinstance(key, int):
526
+ if (key < 0 and key + size < 0) or (key >= size):
527
+ raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
528
+ return
529
+ elif isinstance(key, slice):
530
+ pass
531
+ elif isinstance(key, range):
532
+ if len(key) > 0:
533
+ _check_valid_index_key(max(key), size=size)
534
+ _check_valid_index_key(min(key), size=size)
535
+ elif isinstance(key, Iterable):
536
+ if len(key) > 0:
537
+ _check_valid_index_key(int(max(key)), size=size)
538
+ _check_valid_index_key(int(min(key)), size=size)
539
+ else:
540
+ _raise_bad_key_type(key)
541
+
542
+
543
+ def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
544
+ if isinstance(key, int):
545
+ return "row"
546
+ elif isinstance(key, str):
547
+ return "column"
548
+ elif isinstance(key, (slice, range, Iterable)):
549
+ return "batch"
550
+ _raise_bad_key_type(key)
551
+
552
+
553
+ def query_table(
554
+ table: Table,
555
+ key: Union[int, slice, range, str, Iterable],
556
+ indices: Optional[Table] = None,
557
+ ) -> pa.Table:
558
+ """
559
+ Query a Table to extract the subtable that correspond to the given key.
560
+
561
+ Args:
562
+ table (``datasets.table.Table``): The input Table to query from
563
+ key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
564
+ - an integer i: the subtable containing only the i-th row
565
+ - a slice [i:j:k]: the subtable containing the rows that correspond to this slice
566
+ - a range(i, j, k): the subtable containing the rows that correspond to this range
567
+ - a string c: the subtable containing all the rows but only the column c
568
+ - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
569
+ indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
570
+ The indices table must contain one column named "indices" of type uint64.
571
+ This is used in case of shuffling or rows selection.
572
+
573
+
574
+ Returns:
575
+ ``pyarrow.Table``: the result of the query on the input table
576
+ """
577
+ # Check if key is valid
578
+ if not isinstance(key, (int, slice, range, str, Iterable)):
579
+ try:
580
+ key = operator.index(key)
581
+ except TypeError:
582
+ _raise_bad_key_type(key)
583
+ if isinstance(key, str):
584
+ _check_valid_column_key(key, table.column_names)
585
+ else:
586
+ size = indices.num_rows if indices is not None else table.num_rows
587
+ _check_valid_index_key(key, size)
588
+ # Query the main table
589
+ if indices is None:
590
+ pa_subtable = _query_table(table, key)
591
+ else:
592
+ pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
593
+ return pa_subtable
594
+
595
+
596
+ def format_table(
597
+ table: Table,
598
+ key: Union[int, slice, range, str, Iterable],
599
+ formatter: Formatter,
600
+ format_columns: Optional[list] = None,
601
+ output_all_columns=False,
602
+ ):
603
+ """
604
+ Format a Table depending on the key that was used and a Formatter object.
605
+
606
+ Args:
607
+ table (``datasets.table.Table``): The input Table to format
608
+ key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
609
+ the table as either a row, a column or a batch.
610
+ formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
611
+ PythonFormatter, NumpyFormatter, etc.
612
+ format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the
613
+ given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
614
+ output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns
615
+ that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
616
+
617
+
618
+ Returns:
619
+ A row, column or batch formatted object defined by the Formatter:
620
+ - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
621
+ - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
622
+ - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
623
+ - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
624
+ - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
625
+ """
626
+ if isinstance(table, Table):
627
+ pa_table = table.table
628
+ else:
629
+ pa_table = table
630
+ query_type = key_to_query_type(key)
631
+ python_formatter = PythonFormatter(features=formatter.features)
632
+ if format_columns is None:
633
+ return formatter(pa_table, query_type=query_type)
634
+ elif query_type == "column":
635
+ if key in format_columns:
636
+ return formatter(pa_table, query_type)
637
+ else:
638
+ return python_formatter(pa_table, query_type=query_type)
639
+ else:
640
+ pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
641
+ formatted_output = formatter(pa_table_to_format, query_type=query_type)
642
+ if output_all_columns:
643
+ if isinstance(formatted_output, MutableMapping):
644
+ pa_table_with_remaining_columns = pa_table.drop(
645
+ col for col in pa_table.column_names if col in format_columns
646
+ )
647
+ remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
648
+ formatted_output.update(remaining_columns_dict)
649
+ else:
650
+ raise TypeError(
651
+ f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
652
+ )
653
+ return formatted_output